int __pyx_pw_4cuda_8bindings_7runtime_13anon_struct25_3__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_13anon_struct25_3__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr; CYTHON_UNUSED Py_ssize_t __pyx_nargs; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1; #endif __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 12594, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12594, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__init__", 0) < (0)) __PYX_ERR(0, 12594, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, i); __PYX_ERR(0, 12594, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12594, __pyx_L3_error) } __pyx_v__ptr = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[0]); if (unlikely((__pyx_v__ptr == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 12594, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 12594, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct25.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_13anon_struct25_2__init__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct25 *)__pyx_v_self), __pyx_v__ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_13anon_struct25_2__init__(struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct25 *__pyx_v_self, CYTHON_UNUSED __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__init__", 0); /* "cuda/bindings/runtime.pyx":12596 * def __init__(self, void_ptr _ptr): * pass * self._event = cudaEvent_t(_ptr=&self._pvt_ptr[0].launchCompletionEvent.event) # <<<<<<<<<<<<<< * def __dealloc__(self): * pass */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_4 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)(&(__pyx_v_self->_pvt_ptr[0]).launchCompletionEvent.event))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12596, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_2, NULL}; __pyx_t_6 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12596, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_ptr_2, __pyx_t_4, __pyx_t_6, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 12596, __pyx_L1_error) __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder(__pyx_t_3, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_6); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12596, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __Pyx_GIVEREF((PyObject *)__pyx_t_1); __Pyx_GOTREF((PyObject *)__pyx_v_self->_event); __Pyx_DECREF((PyObject *)__pyx_v_self->_event); __pyx_v_self->_event = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEvent_t *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":12594 * self._pvt_ptr = _ptr * * def __init__(self, void_ptr _ptr): # <<<<<<<<<<<<<< * pass * self._event = cudaEvent_t(_ptr=&self._pvt_ptr[0].launchCompletionEvent.event) */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct25.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12597 * pass * self._event = cudaEvent_t(_ptr=&self._pvt_ptr[0].launchCompletionEvent.event) * def __dealloc__(self): # <<<<<<<<<<<<<< * pass * def getPtr(self): */ /* Python wrapper */ static void __pyx_pw_4cuda_8bindings_7runtime_13anon_struct25_5__dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_pw_4cuda_8bindings_7runtime_13anon_struct25_5__dealloc__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_pf_4cuda_8bindings_7runtime_13anon_struct25_4__dealloc__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct25 *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_4cuda_8bindings_7runtime_13anon_struct25_4__dealloc__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct25 *__pyx_v_self) { /* function exit code */ } /* "cuda/bindings/runtime.pyx":12599 * def __dealloc__(self): * pass * def getPtr(self): # <<<<<<<<<<<<<< * return &self._pvt_ptr[0].launchCompletionEvent * def __repr__(self): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13anon_struct25_7getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_13anon_struct25_6getPtr, "anon_struct25.getPtr(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_13anon_struct25_7getPtr = {"getPtr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_13anon_struct25_7getPtr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_13anon_struct25_6getPtr}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13anon_struct25_7getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("getPtr (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("getPtr", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("getPtr", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_13anon_struct25_6getPtr(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct25 *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_13anon_struct25_6getPtr(struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct25 *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("getPtr", 0); /* "cuda/bindings/runtime.pyx":12600 * pass * def getPtr(self): * return &self._pvt_ptr[0].launchCompletionEvent # <<<<<<<<<<<<<< * def __repr__(self): * if self._pvt_ptr is not NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)(&(__pyx_v_self->_pvt_ptr[0]).launchCompletionEvent))); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12600, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":12599 * def __dealloc__(self): * pass * def getPtr(self): # <<<<<<<<<<<<<< * return &self._pvt_ptr[0].launchCompletionEvent * def __repr__(self): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct25.getPtr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12601 * def getPtr(self): * return &self._pvt_ptr[0].launchCompletionEvent * def __repr__(self): # <<<<<<<<<<<<<< * if self._pvt_ptr is not NULL: * str_list = [] */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13anon_struct25_9__repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13anon_struct25_9__repr__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_13anon_struct25_8__repr__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct25 *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_13anon_struct25_8__repr__(struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct25 *__pyx_v_self) { PyObject *__pyx_v_str_list = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "cuda/bindings/runtime.pyx":12602 * return &self._pvt_ptr[0].launchCompletionEvent * def __repr__(self): * if self._pvt_ptr is not NULL: # <<<<<<<<<<<<<< * str_list = [] * try: */ __pyx_t_1 = (__pyx_v_self->_pvt_ptr != NULL); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":12603 * def __repr__(self): * if self._pvt_ptr is not NULL: * str_list = [] # <<<<<<<<<<<<<< * try: * str_list += ['event : ' + str(self.event)] */ __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12603, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_str_list = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":12604 * if self._pvt_ptr is not NULL: * str_list = [] * try: # <<<<<<<<<<<<<< * str_list += ['event : ' + str(self.event)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "cuda/bindings/runtime.pyx":12605 * str_list = [] * try: * str_list += ['event : ' + str(self.event)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['event : '] */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_event_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12605, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __Pyx_PyObject_Unicode(__pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12605, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_event, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12605, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12605, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 12605, __pyx_L4_error); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12605, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_2)); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":12604 * if self._pvt_ptr is not NULL: * str_list = [] * try: # <<<<<<<<<<<<<< * str_list += ['event : ' + str(self.event)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; /* "cuda/bindings/runtime.pyx":12606 * try: * str_list += ['event : ' + str(self.event)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['event : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct25.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_2, &__pyx_t_6, &__pyx_t_8) < 0) __PYX_ERR(0, 12606, __pyx_L6_except_error) __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_8); /* "cuda/bindings/runtime.pyx":12607 * str_list += ['event : ' + str(self.event)] * except ValueError: * str_list += ['event : '] # <<<<<<<<<<<<<< * try: * str_list += ['flags : ' + str(self.flags)] */ __pyx_t_9 = PyList_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 12607, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_event_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_event_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 0, __pyx_mstate_global->__pyx_kp_u_event_ValueError) != (0)) __PYX_ERR(0, 12607, __pyx_L6_except_error); __pyx_t_10 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 12607, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_10)); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L5_exception_handled; } goto __pyx_L6_except_error; /* "cuda/bindings/runtime.pyx":12604 * if self._pvt_ptr is not NULL: * str_list = [] * try: # <<<<<<<<<<<<<< * str_list += ['event : ' + str(self.event)] * except ValueError: */ __pyx_L6_except_error:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L5_exception_handled:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); __pyx_L9_try_end:; } /* "cuda/bindings/runtime.pyx":12608 * except ValueError: * str_list += ['event : '] * try: # <<<<<<<<<<<<<< * str_list += ['flags : ' + str(self.flags)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_5, &__pyx_t_4, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_5); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "cuda/bindings/runtime.pyx":12609 * str_list += ['event : '] * try: * str_list += ['flags : ' + str(self.flags)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['flags : '] */ __pyx_t_8 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_flags_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12609, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_6 = __Pyx_PyObject_Unicode(__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12609, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_flags, __pyx_t_6); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12609, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12609, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_8); if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_8) != (0)) __PYX_ERR(0, 12609, __pyx_L12_error); __pyx_t_8 = 0; __pyx_t_8 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_6); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12609, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_8)); __pyx_t_8 = 0; /* "cuda/bindings/runtime.pyx":12608 * except ValueError: * str_list += ['event : '] * try: # <<<<<<<<<<<<<< * str_list += ['flags : ' + str(self.flags)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L17_try_end; __pyx_L12_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":12610 * try: * str_list += ['flags : ' + str(self.flags)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['flags : '] * return '\n'.join(str_list) */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct25.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_8, &__pyx_t_6, &__pyx_t_2) < 0) __PYX_ERR(0, 12610, __pyx_L14_except_error) __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_2); /* "cuda/bindings/runtime.pyx":12611 * str_list += ['flags : ' + str(self.flags)] * except ValueError: * str_list += ['flags : '] # <<<<<<<<<<<<<< * return '\n'.join(str_list) * else: */ __pyx_t_10 = PyList_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 12611, __pyx_L14_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_flags_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_flags_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_10, 0, __pyx_mstate_global->__pyx_kp_u_flags_ValueError) != (0)) __PYX_ERR(0, 12611, __pyx_L14_except_error); __pyx_t_9 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 12611, __pyx_L14_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_9)); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; goto __pyx_L13_exception_handled; } goto __pyx_L14_except_error; /* "cuda/bindings/runtime.pyx":12608 * except ValueError: * str_list += ['event : '] * try: # <<<<<<<<<<<<<< * str_list += ['flags : ' + str(self.flags)] * except ValueError: */ __pyx_L14_except_error:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); goto __pyx_L1_error; __pyx_L13_exception_handled:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); __pyx_L17_try_end:; } /* "cuda/bindings/runtime.pyx":12612 * except ValueError: * str_list += ['flags : '] * return '\n'.join(str_list) # <<<<<<<<<<<<<< * else: * return '' */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyUnicode_Join(__pyx_mstate_global->__pyx_kp_u__2, __pyx_v_str_list); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":12602 * return &self._pvt_ptr[0].launchCompletionEvent * def __repr__(self): * if self._pvt_ptr is not NULL: # <<<<<<<<<<<<<< * str_list = [] * try: */ } /* "cuda/bindings/runtime.pyx":12614 * return '\n'.join(str_list) * else: * return '' # <<<<<<<<<<<<<< * @property * def event(self): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u__3); __pyx_r = __pyx_mstate_global->__pyx_kp_u__3; goto __pyx_L0; } /* "cuda/bindings/runtime.pyx":12601 * def getPtr(self): * return &self._pvt_ptr[0].launchCompletionEvent * def __repr__(self): # <<<<<<<<<<<<<< * if self._pvt_ptr is not NULL: * str_list = [] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct25.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_str_list); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12615 * else: * return '' * @property # <<<<<<<<<<<<<< * def event(self): * return self._event */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13anon_struct25_5event_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13anon_struct25_5event_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_13anon_struct25_5event___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct25 *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_13anon_struct25_5event___get__(struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct25 *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":12617 * @property * def event(self): * return self._event # <<<<<<<<<<<<<< * @event.setter * def event(self, event): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF((PyObject *)__pyx_v_self->_event); __pyx_r = ((PyObject *)__pyx_v_self->_event); goto __pyx_L0; /* "cuda/bindings/runtime.pyx":12615 * else: * return '' * @property # <<<<<<<<<<<<<< * def event(self): * return self._event */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12618 * def event(self): * return self._event * @event.setter # <<<<<<<<<<<<<< * def event(self, event): * cdef cyruntime.cudaEvent_t cyevent */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_13anon_struct25_5event_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_event); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_13anon_struct25_5event_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_event) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_13anon_struct25_5event_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct25 *)__pyx_v_self), ((PyObject *)__pyx_v_event)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_13anon_struct25_5event_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct25 *__pyx_v_self, PyObject *__pyx_v_event) { cudaEvent_t __pyx_v_cyevent; PyObject *__pyx_v_pevent = NULL; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; size_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); /* "cuda/bindings/runtime.pyx":12621 * def event(self, event): * cdef cyruntime.cudaEvent_t cyevent * if event is None: # <<<<<<<<<<<<<< * cyevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): */ __pyx_t_1 = (__pyx_v_event == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":12622 * cdef cyruntime.cudaEvent_t cyevent * if event is None: * cyevent = 0 # <<<<<<<<<<<<<< * elif isinstance(event, (cudaEvent_t,driver.CUevent)): * pevent = int(event) */ __pyx_v_cyevent = ((cudaEvent_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)0)); /* "cuda/bindings/runtime.pyx":12621 * def event(self, event): * cdef cyruntime.cudaEvent_t cyevent * if event is None: # <<<<<<<<<<<<<< * cyevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":12623 * if event is None: * cyevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): # <<<<<<<<<<<<<< * pevent = int(event) * cyevent = pevent */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_event, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_event, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUevent); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":12624 * cyevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): * pevent = int(event) # <<<<<<<<<<<<<< * cyevent = pevent * else: */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_event); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12624, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pevent = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":12625 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): * pevent = int(event) * cyevent = pevent # <<<<<<<<<<<<<< * else: * pevent = int(cudaEvent_t(event)) */ __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pevent); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 12625, __pyx_L1_error) __pyx_v_cyevent = ((cudaEvent_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_4)); /* "cuda/bindings/runtime.pyx":12623 * if event is None: * cyevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): # <<<<<<<<<<<<<< * pevent = int(event) * cyevent = pevent */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":12627 * cyevent = pevent * else: * pevent = int(cudaEvent_t(event)) # <<<<<<<<<<<<<< * cyevent = pevent * self._event._pvt_ptr[0] = cyevent */ /*else*/ { __pyx_t_5 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_6 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_7 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_v_event}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_6, __pyx_callargs+__pyx_t_7, (2-__pyx_t_7) | (__pyx_t_7*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12627, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_6 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12627, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pevent = ((PyObject*)__pyx_t_6); __pyx_t_6 = 0; /* "cuda/bindings/runtime.pyx":12628 * else: * pevent = int(cudaEvent_t(event)) * cyevent = pevent # <<<<<<<<<<<<<< * self._event._pvt_ptr[0] = cyevent * @property */ __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pevent); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 12628, __pyx_L1_error) __pyx_v_cyevent = ((cudaEvent_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_4)); } __pyx_L3:; /* "cuda/bindings/runtime.pyx":12629 * pevent = int(cudaEvent_t(event)) * cyevent = pevent * self._event._pvt_ptr[0] = cyevent # <<<<<<<<<<<<<< * @property * def flags(self): */ (__pyx_v_self->_event->__pyx_base._pvt_ptr[0]) = __pyx_v_cyevent; /* "cuda/bindings/runtime.pyx":12618 * def event(self): * return self._event * @event.setter # <<<<<<<<<<<<<< * def event(self, event): * cdef cyruntime.cudaEvent_t cyevent */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct25.event.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pevent); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12630 * cyevent = pevent * self._event._pvt_ptr[0] = cyevent * @property # <<<<<<<<<<<<<< * def flags(self): * return self._pvt_ptr[0].launchCompletionEvent.flags */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13anon_struct25_5flags_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13anon_struct25_5flags_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_13anon_struct25_5flags___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct25 *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_13anon_struct25_5flags___get__(struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct25 *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":12632 * @property * def flags(self): * return self._pvt_ptr[0].launchCompletionEvent.flags # <<<<<<<<<<<<<< * @flags.setter * def flags(self, int flags): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_int((__pyx_v_self->_pvt_ptr[0]).launchCompletionEvent.flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12632, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":12630 * cyevent = pevent * self._event._pvt_ptr[0] = cyevent * @property # <<<<<<<<<<<<<< * def flags(self): * return self._pvt_ptr[0].launchCompletionEvent.flags */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct25.flags.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12633 * def flags(self): * return self._pvt_ptr[0].launchCompletionEvent.flags * @flags.setter # <<<<<<<<<<<<<< * def flags(self, int flags): * self._pvt_ptr[0].launchCompletionEvent.flags = flags */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_13anon_struct25_5flags_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_flags); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_13anon_struct25_5flags_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_flags) { int __pyx_v_flags; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); assert(__pyx_arg_flags); { __pyx_v_flags = __Pyx_PyLong_As_int(__pyx_arg_flags); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12634, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct25.flags.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_13anon_struct25_5flags_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct25 *)__pyx_v_self), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_13anon_struct25_5flags_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct25 *__pyx_v_self, int __pyx_v_flags) { int __pyx_r; /* "cuda/bindings/runtime.pyx":12635 * @flags.setter * def flags(self, int flags): * self._pvt_ptr[0].launchCompletionEvent.flags = flags # <<<<<<<<<<<<<< * * cdef class anon_struct26: */ (__pyx_v_self->_pvt_ptr[0]).launchCompletionEvent.flags = __pyx_v_flags; /* "cuda/bindings/runtime.pyx":12633 * def flags(self): * return self._pvt_ptr[0].launchCompletionEvent.flags * @flags.setter # <<<<<<<<<<<<<< * def flags(self, int flags): * self._pvt_ptr[0].launchCompletionEvent.flags = flags */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13anon_struct25_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_13anon_struct25_10__reduce_cython__, "anon_struct25.__reduce_cython__(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_13anon_struct25_11__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_13anon_struct25_11__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_13anon_struct25_10__reduce_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13anon_struct25_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_13anon_struct25_10__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct25 *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_13anon_struct25_10__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct25 *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct25.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13anon_struct25_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_13anon_struct25_12__setstate_cython__, "anon_struct25.__setstate_cython__(self, __pyx_state)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_13anon_struct25_13__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_13anon_struct25_13__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_13anon_struct25_12__setstate_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13anon_struct25_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(2, 3, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(2, 3, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(2, 3, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) } __pyx_v___pyx_state = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(2, 3, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct25.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_13anon_struct25_12__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct25 *)__pyx_v_self), __pyx_v___pyx_state); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_13anon_struct25_12__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct25 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct25.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12651 * Get memory address of class instance * """ * def __cinit__(self, void_ptr _ptr): # <<<<<<<<<<<<<< * self._pvt_ptr = _ptr * */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_13anon_struct26_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_13anon_struct26_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr; CYTHON_UNUSED Py_ssize_t __pyx_nargs; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1; #endif __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 12651, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12651, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__cinit__", 0) < (0)) __PYX_ERR(0, 12651, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 1, 1, i); __PYX_ERR(0, 12651, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12651, __pyx_L3_error) } __pyx_v__ptr = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[0]); if (unlikely((__pyx_v__ptr == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 12651, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 12651, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct26.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_13anon_struct26___cinit__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct26 *)__pyx_v_self), __pyx_v__ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_13anon_struct26___cinit__(struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct26 *__pyx_v_self, __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr) { int __pyx_r; /* "cuda/bindings/runtime.pyx":12652 * """ * def __cinit__(self, void_ptr _ptr): * self._pvt_ptr = _ptr # <<<<<<<<<<<<<< * * def __init__(self, void_ptr _ptr): */ __pyx_v_self->_pvt_ptr = ((union cudaLaunchAttributeValue *)__pyx_v__ptr); /* "cuda/bindings/runtime.pyx":12651 * Get memory address of class instance * """ * def __cinit__(self, void_ptr _ptr): # <<<<<<<<<<<<<< * self._pvt_ptr = _ptr * */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":12654 * self._pvt_ptr = _ptr * * def __init__(self, void_ptr _ptr): # <<<<<<<<<<<<<< * pass * self._devNode = cudaGraphDeviceNode_t(_ptr=&self._pvt_ptr[0].deviceUpdatableKernelNode.devNode) */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_13anon_struct26_3__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_13anon_struct26_3__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr; CYTHON_UNUSED Py_ssize_t __pyx_nargs; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1; #endif __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 12654, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12654, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__init__", 0) < (0)) __PYX_ERR(0, 12654, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, i); __PYX_ERR(0, 12654, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12654, __pyx_L3_error) } __pyx_v__ptr = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[0]); if (unlikely((__pyx_v__ptr == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 12654, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 12654, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct26.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_13anon_struct26_2__init__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct26 *)__pyx_v_self), __pyx_v__ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_13anon_struct26_2__init__(struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct26 *__pyx_v_self, CYTHON_UNUSED __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__init__", 0); /* "cuda/bindings/runtime.pyx":12656 * def __init__(self, void_ptr _ptr): * pass * self._devNode = cudaGraphDeviceNode_t(_ptr=&self._pvt_ptr[0].deviceUpdatableKernelNode.devNode) # <<<<<<<<<<<<<< * def __dealloc__(self): * pass */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphDeviceNode_t); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphDeviceNode_t); __pyx_t_4 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)(&(__pyx_v_self->_pvt_ptr[0]).deviceUpdatableKernelNode.devNode))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12656, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_2, NULL}; __pyx_t_6 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12656, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_ptr_2, __pyx_t_4, __pyx_t_6, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 12656, __pyx_L1_error) __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder(__pyx_t_3, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_6); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12656, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __Pyx_GIVEREF((PyObject *)__pyx_t_1); __Pyx_GOTREF((PyObject *)__pyx_v_self->_devNode); __Pyx_DECREF((PyObject *)__pyx_v_self->_devNode); __pyx_v_self->_devNode = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphDeviceNode_t *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":12654 * self._pvt_ptr = _ptr * * def __init__(self, void_ptr _ptr): # <<<<<<<<<<<<<< * pass * self._devNode = cudaGraphDeviceNode_t(_ptr=&self._pvt_ptr[0].deviceUpdatableKernelNode.devNode) */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct26.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12657 * pass * self._devNode = cudaGraphDeviceNode_t(_ptr=&self._pvt_ptr[0].deviceUpdatableKernelNode.devNode) * def __dealloc__(self): # <<<<<<<<<<<<<< * pass * def getPtr(self): */ /* Python wrapper */ static void __pyx_pw_4cuda_8bindings_7runtime_13anon_struct26_5__dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_pw_4cuda_8bindings_7runtime_13anon_struct26_5__dealloc__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_pf_4cuda_8bindings_7runtime_13anon_struct26_4__dealloc__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct26 *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_4cuda_8bindings_7runtime_13anon_struct26_4__dealloc__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct26 *__pyx_v_self) { /* function exit code */ } /* "cuda/bindings/runtime.pyx":12659 * def __dealloc__(self): * pass * def getPtr(self): # <<<<<<<<<<<<<< * return &self._pvt_ptr[0].deviceUpdatableKernelNode * def __repr__(self): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13anon_struct26_7getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_13anon_struct26_6getPtr, "anon_struct26.getPtr(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_13anon_struct26_7getPtr = {"getPtr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_13anon_struct26_7getPtr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_13anon_struct26_6getPtr}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13anon_struct26_7getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("getPtr (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("getPtr", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("getPtr", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_13anon_struct26_6getPtr(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct26 *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_13anon_struct26_6getPtr(struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct26 *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("getPtr", 0); /* "cuda/bindings/runtime.pyx":12660 * pass * def getPtr(self): * return &self._pvt_ptr[0].deviceUpdatableKernelNode # <<<<<<<<<<<<<< * def __repr__(self): * if self._pvt_ptr is not NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)(&(__pyx_v_self->_pvt_ptr[0]).deviceUpdatableKernelNode))); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12660, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":12659 * def __dealloc__(self): * pass * def getPtr(self): # <<<<<<<<<<<<<< * return &self._pvt_ptr[0].deviceUpdatableKernelNode * def __repr__(self): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct26.getPtr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12661 * def getPtr(self): * return &self._pvt_ptr[0].deviceUpdatableKernelNode * def __repr__(self): # <<<<<<<<<<<<<< * if self._pvt_ptr is not NULL: * str_list = [] */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13anon_struct26_9__repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13anon_struct26_9__repr__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_13anon_struct26_8__repr__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct26 *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_13anon_struct26_8__repr__(struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct26 *__pyx_v_self) { PyObject *__pyx_v_str_list = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "cuda/bindings/runtime.pyx":12662 * return &self._pvt_ptr[0].deviceUpdatableKernelNode * def __repr__(self): * if self._pvt_ptr is not NULL: # <<<<<<<<<<<<<< * str_list = [] * try: */ __pyx_t_1 = (__pyx_v_self->_pvt_ptr != NULL); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":12663 * def __repr__(self): * if self._pvt_ptr is not NULL: * str_list = [] # <<<<<<<<<<<<<< * try: * str_list += ['deviceUpdatable : ' + str(self.deviceUpdatable)] */ __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12663, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_str_list = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":12664 * if self._pvt_ptr is not NULL: * str_list = [] * try: # <<<<<<<<<<<<<< * str_list += ['deviceUpdatable : ' + str(self.deviceUpdatable)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "cuda/bindings/runtime.pyx":12665 * str_list = [] * try: * str_list += ['deviceUpdatable : ' + str(self.deviceUpdatable)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['deviceUpdatable : '] */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_deviceUpdatable_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12665, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __Pyx_PyObject_Unicode(__pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12665, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_deviceUpdatable, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12665, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12665, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 12665, __pyx_L4_error); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12665, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_2)); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":12664 * if self._pvt_ptr is not NULL: * str_list = [] * try: # <<<<<<<<<<<<<< * str_list += ['deviceUpdatable : ' + str(self.deviceUpdatable)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; /* "cuda/bindings/runtime.pyx":12666 * try: * str_list += ['deviceUpdatable : ' + str(self.deviceUpdatable)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['deviceUpdatable : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct26.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_2, &__pyx_t_6, &__pyx_t_8) < 0) __PYX_ERR(0, 12666, __pyx_L6_except_error) __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_8); /* "cuda/bindings/runtime.pyx":12667 * str_list += ['deviceUpdatable : ' + str(self.deviceUpdatable)] * except ValueError: * str_list += ['deviceUpdatable : '] # <<<<<<<<<<<<<< * try: * str_list += ['devNode : ' + str(self.devNode)] */ __pyx_t_9 = PyList_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 12667, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_deviceUpdatable_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_deviceUpdatable_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 0, __pyx_mstate_global->__pyx_kp_u_deviceUpdatable_ValueError) != (0)) __PYX_ERR(0, 12667, __pyx_L6_except_error); __pyx_t_10 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 12667, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_10)); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L5_exception_handled; } goto __pyx_L6_except_error; /* "cuda/bindings/runtime.pyx":12664 * if self._pvt_ptr is not NULL: * str_list = [] * try: # <<<<<<<<<<<<<< * str_list += ['deviceUpdatable : ' + str(self.deviceUpdatable)] * except ValueError: */ __pyx_L6_except_error:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L5_exception_handled:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); __pyx_L9_try_end:; } /* "cuda/bindings/runtime.pyx":12668 * except ValueError: * str_list += ['deviceUpdatable : '] * try: # <<<<<<<<<<<<<< * str_list += ['devNode : ' + str(self.devNode)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_5, &__pyx_t_4, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_5); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "cuda/bindings/runtime.pyx":12669 * str_list += ['deviceUpdatable : '] * try: * str_list += ['devNode : ' + str(self.devNode)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['devNode : '] */ __pyx_t_8 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_devNode_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12669, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_6 = __Pyx_PyObject_Unicode(__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12669, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_devNode, __pyx_t_6); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12669, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12669, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_8); if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_8) != (0)) __PYX_ERR(0, 12669, __pyx_L12_error); __pyx_t_8 = 0; __pyx_t_8 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_6); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12669, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_8)); __pyx_t_8 = 0; /* "cuda/bindings/runtime.pyx":12668 * except ValueError: * str_list += ['deviceUpdatable : '] * try: # <<<<<<<<<<<<<< * str_list += ['devNode : ' + str(self.devNode)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L17_try_end; __pyx_L12_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":12670 * try: * str_list += ['devNode : ' + str(self.devNode)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['devNode : '] * return '\n'.join(str_list) */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct26.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_8, &__pyx_t_6, &__pyx_t_2) < 0) __PYX_ERR(0, 12670, __pyx_L14_except_error) __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_2); /* "cuda/bindings/runtime.pyx":12671 * str_list += ['devNode : ' + str(self.devNode)] * except ValueError: * str_list += ['devNode : '] # <<<<<<<<<<<<<< * return '\n'.join(str_list) * else: */ __pyx_t_10 = PyList_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 12671, __pyx_L14_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_devNode_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_devNode_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_10, 0, __pyx_mstate_global->__pyx_kp_u_devNode_ValueError) != (0)) __PYX_ERR(0, 12671, __pyx_L14_except_error); __pyx_t_9 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 12671, __pyx_L14_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_9)); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; goto __pyx_L13_exception_handled; } goto __pyx_L14_except_error; /* "cuda/bindings/runtime.pyx":12668 * except ValueError: * str_list += ['deviceUpdatable : '] * try: # <<<<<<<<<<<<<< * str_list += ['devNode : ' + str(self.devNode)] * except ValueError: */ __pyx_L14_except_error:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); goto __pyx_L1_error; __pyx_L13_exception_handled:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); __pyx_L17_try_end:; } /* "cuda/bindings/runtime.pyx":12672 * except ValueError: * str_list += ['devNode : '] * return '\n'.join(str_list) # <<<<<<<<<<<<<< * else: * return '' */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyUnicode_Join(__pyx_mstate_global->__pyx_kp_u__2, __pyx_v_str_list); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12672, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":12662 * return &self._pvt_ptr[0].deviceUpdatableKernelNode * def __repr__(self): * if self._pvt_ptr is not NULL: # <<<<<<<<<<<<<< * str_list = [] * try: */ } /* "cuda/bindings/runtime.pyx":12674 * return '\n'.join(str_list) * else: * return '' # <<<<<<<<<<<<<< * @property * def deviceUpdatable(self): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u__3); __pyx_r = __pyx_mstate_global->__pyx_kp_u__3; goto __pyx_L0; } /* "cuda/bindings/runtime.pyx":12661 * def getPtr(self): * return &self._pvt_ptr[0].deviceUpdatableKernelNode * def __repr__(self): # <<<<<<<<<<<<<< * if self._pvt_ptr is not NULL: * str_list = [] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct26.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_str_list); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12675 * else: * return '' * @property # <<<<<<<<<<<<<< * def deviceUpdatable(self): * return self._pvt_ptr[0].deviceUpdatableKernelNode.deviceUpdatable */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13anon_struct26_15deviceUpdatable_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13anon_struct26_15deviceUpdatable_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_13anon_struct26_15deviceUpdatable___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct26 *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_13anon_struct26_15deviceUpdatable___get__(struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct26 *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":12677 * @property * def deviceUpdatable(self): * return self._pvt_ptr[0].deviceUpdatableKernelNode.deviceUpdatable # <<<<<<<<<<<<<< * @deviceUpdatable.setter * def deviceUpdatable(self, int deviceUpdatable): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_int((__pyx_v_self->_pvt_ptr[0]).deviceUpdatableKernelNode.deviceUpdatable); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12677, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":12675 * else: * return '' * @property # <<<<<<<<<<<<<< * def deviceUpdatable(self): * return self._pvt_ptr[0].deviceUpdatableKernelNode.deviceUpdatable */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct26.deviceUpdatable.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12678 * def deviceUpdatable(self): * return self._pvt_ptr[0].deviceUpdatableKernelNode.deviceUpdatable * @deviceUpdatable.setter # <<<<<<<<<<<<<< * def deviceUpdatable(self, int deviceUpdatable): * self._pvt_ptr[0].deviceUpdatableKernelNode.deviceUpdatable = deviceUpdatable */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_13anon_struct26_15deviceUpdatable_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_deviceUpdatable); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_13anon_struct26_15deviceUpdatable_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_deviceUpdatable) { int __pyx_v_deviceUpdatable; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); assert(__pyx_arg_deviceUpdatable); { __pyx_v_deviceUpdatable = __Pyx_PyLong_As_int(__pyx_arg_deviceUpdatable); if (unlikely((__pyx_v_deviceUpdatable == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12679, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct26.deviceUpdatable.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_13anon_struct26_15deviceUpdatable_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct26 *)__pyx_v_self), ((int)__pyx_v_deviceUpdatable)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_13anon_struct26_15deviceUpdatable_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct26 *__pyx_v_self, int __pyx_v_deviceUpdatable) { int __pyx_r; /* "cuda/bindings/runtime.pyx":12680 * @deviceUpdatable.setter * def deviceUpdatable(self, int deviceUpdatable): * self._pvt_ptr[0].deviceUpdatableKernelNode.deviceUpdatable = deviceUpdatable # <<<<<<<<<<<<<< * @property * def devNode(self): */ (__pyx_v_self->_pvt_ptr[0]).deviceUpdatableKernelNode.deviceUpdatable = __pyx_v_deviceUpdatable; /* "cuda/bindings/runtime.pyx":12678 * def deviceUpdatable(self): * return self._pvt_ptr[0].deviceUpdatableKernelNode.deviceUpdatable * @deviceUpdatable.setter # <<<<<<<<<<<<<< * def deviceUpdatable(self, int deviceUpdatable): * self._pvt_ptr[0].deviceUpdatableKernelNode.deviceUpdatable = deviceUpdatable */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":12681 * def deviceUpdatable(self, int deviceUpdatable): * self._pvt_ptr[0].deviceUpdatableKernelNode.deviceUpdatable = deviceUpdatable * @property # <<<<<<<<<<<<<< * def devNode(self): * return self._devNode */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13anon_struct26_7devNode_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13anon_struct26_7devNode_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_13anon_struct26_7devNode___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct26 *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_13anon_struct26_7devNode___get__(struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct26 *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":12683 * @property * def devNode(self): * return self._devNode # <<<<<<<<<<<<<< * @devNode.setter * def devNode(self, devNode): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF((PyObject *)__pyx_v_self->_devNode); __pyx_r = ((PyObject *)__pyx_v_self->_devNode); goto __pyx_L0; /* "cuda/bindings/runtime.pyx":12681 * def deviceUpdatable(self, int deviceUpdatable): * self._pvt_ptr[0].deviceUpdatableKernelNode.deviceUpdatable = deviceUpdatable * @property # <<<<<<<<<<<<<< * def devNode(self): * return self._devNode */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12684 * def devNode(self): * return self._devNode * @devNode.setter # <<<<<<<<<<<<<< * def devNode(self, devNode): * cdef cyruntime.cudaGraphDeviceNode_t cydevNode */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_13anon_struct26_7devNode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_devNode); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_13anon_struct26_7devNode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_devNode) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_13anon_struct26_7devNode_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct26 *)__pyx_v_self), ((PyObject *)__pyx_v_devNode)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_13anon_struct26_7devNode_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct26 *__pyx_v_self, PyObject *__pyx_v_devNode) { cudaGraphDeviceNode_t __pyx_v_cydevNode; PyObject *__pyx_v_pdevNode = NULL; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); /* "cuda/bindings/runtime.pyx":12687 * def devNode(self, devNode): * cdef cyruntime.cudaGraphDeviceNode_t cydevNode * if devNode is None: # <<<<<<<<<<<<<< * cydevNode = 0 * elif isinstance(devNode, (cudaGraphDeviceNode_t,)): */ __pyx_t_1 = (__pyx_v_devNode == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":12688 * cdef cyruntime.cudaGraphDeviceNode_t cydevNode * if devNode is None: * cydevNode = 0 # <<<<<<<<<<<<<< * elif isinstance(devNode, (cudaGraphDeviceNode_t,)): * pdevNode = int(devNode) */ __pyx_v_cydevNode = ((cudaGraphDeviceNode_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)0)); /* "cuda/bindings/runtime.pyx":12687 * def devNode(self, devNode): * cdef cyruntime.cudaGraphDeviceNode_t cydevNode * if devNode is None: # <<<<<<<<<<<<<< * cydevNode = 0 * elif isinstance(devNode, (cudaGraphDeviceNode_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":12689 * if devNode is None: * cydevNode = 0 * elif isinstance(devNode, (cudaGraphDeviceNode_t,)): # <<<<<<<<<<<<<< * pdevNode = int(devNode) * cydevNode = pdevNode */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_devNode, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphDeviceNode_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":12690 * cydevNode = 0 * elif isinstance(devNode, (cudaGraphDeviceNode_t,)): * pdevNode = int(devNode) # <<<<<<<<<<<<<< * cydevNode = pdevNode * else: */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_devNode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12690, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_pdevNode = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":12691 * elif isinstance(devNode, (cudaGraphDeviceNode_t,)): * pdevNode = int(devNode) * cydevNode = pdevNode # <<<<<<<<<<<<<< * else: * pdevNode = int(cudaGraphDeviceNode_t(devNode)) */ __pyx_t_3 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pdevNode); if (unlikely((__pyx_t_3 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 12691, __pyx_L1_error) __pyx_v_cydevNode = ((cudaGraphDeviceNode_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_3)); /* "cuda/bindings/runtime.pyx":12689 * if devNode is None: * cydevNode = 0 * elif isinstance(devNode, (cudaGraphDeviceNode_t,)): # <<<<<<<<<<<<<< * pdevNode = int(devNode) * cydevNode = pdevNode */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":12693 * cydevNode = pdevNode * else: * pdevNode = int(cudaGraphDeviceNode_t(devNode)) # <<<<<<<<<<<<<< * cydevNode = pdevNode * self._devNode._pvt_ptr[0] = cydevNode */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphDeviceNode_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphDeviceNode_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_devNode}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12693, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12693, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_pdevNode = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":12694 * else: * pdevNode = int(cudaGraphDeviceNode_t(devNode)) * cydevNode = pdevNode # <<<<<<<<<<<<<< * self._devNode._pvt_ptr[0] = cydevNode * */ __pyx_t_3 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pdevNode); if (unlikely((__pyx_t_3 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 12694, __pyx_L1_error) __pyx_v_cydevNode = ((cudaGraphDeviceNode_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_3)); } __pyx_L3:; /* "cuda/bindings/runtime.pyx":12695 * pdevNode = int(cudaGraphDeviceNode_t(devNode)) * cydevNode = pdevNode * self._devNode._pvt_ptr[0] = cydevNode # <<<<<<<<<<<<<< * * cdef class cudaLaunchAttributeValue: */ (__pyx_v_self->_devNode->_pvt_ptr[0]) = __pyx_v_cydevNode; /* "cuda/bindings/runtime.pyx":12684 * def devNode(self): * return self._devNode * @devNode.setter # <<<<<<<<<<<<<< * def devNode(self, devNode): * cdef cyruntime.cudaGraphDeviceNode_t cydevNode */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct26.devNode.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pdevNode); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13anon_struct26_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_13anon_struct26_10__reduce_cython__, "anon_struct26.__reduce_cython__(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_13anon_struct26_11__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_13anon_struct26_11__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_13anon_struct26_10__reduce_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13anon_struct26_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_13anon_struct26_10__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct26 *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_13anon_struct26_10__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct26 *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct26.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13anon_struct26_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_13anon_struct26_12__setstate_cython__, "anon_struct26.__setstate_cython__(self, __pyx_state)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_13anon_struct26_13__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_13anon_struct26_13__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_13anon_struct26_12__setstate_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13anon_struct26_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(2, 3, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(2, 3, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(2, 3, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) } __pyx_v___pyx_state = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(2, 3, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct26.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_13anon_struct26_12__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct26 *)__pyx_v_self), __pyx_v___pyx_state); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_13anon_struct26_12__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct26 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct26.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12780 * Get memory address of class instance * """ * def __cinit__(self, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * if _ptr == 0: * self._pvt_ptr = &self._pvt_val */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr; CYTHON_UNUSED Py_ssize_t __pyx_nargs; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1; #endif __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 12780, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12780, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__cinit__", 0) < (0)) __PYX_ERR(0, 12780, __pyx_L3_error) } else { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12780, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } } if (values[0]) { __pyx_v__ptr = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[0]); if (unlikely((__pyx_v__ptr == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 12780, __pyx_L3_error) } else { __pyx_v__ptr = ((__pyx_t_4cuda_8bindings_7runtime_void_ptr)0); } } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 12780, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue___cinit__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self), __pyx_v__ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue___cinit__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self, __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr) { int __pyx_r; int __pyx_t_1; /* "cuda/bindings/runtime.pyx":12781 * """ * def __cinit__(self, void_ptr _ptr = 0): * if _ptr == 0: # <<<<<<<<<<<<<< * self._pvt_ptr = &self._pvt_val * else: */ __pyx_t_1 = (__pyx_v__ptr == 0); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":12782 * def __cinit__(self, void_ptr _ptr = 0): * if _ptr == 0: * self._pvt_ptr = &self._pvt_val # <<<<<<<<<<<<<< * else: * self._pvt_ptr = _ptr */ __pyx_v_self->_pvt_ptr = (&__pyx_v_self->_pvt_val); /* "cuda/bindings/runtime.pyx":12781 * """ * def __cinit__(self, void_ptr _ptr = 0): * if _ptr == 0: # <<<<<<<<<<<<<< * self._pvt_ptr = &self._pvt_val * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":12784 * self._pvt_ptr = &self._pvt_val * else: * self._pvt_ptr = _ptr # <<<<<<<<<<<<<< * def __init__(self, void_ptr _ptr = 0): * pass */ /*else*/ { __pyx_v_self->_pvt_ptr = ((union cudaLaunchAttributeValue *)__pyx_v__ptr); } __pyx_L3:; /* "cuda/bindings/runtime.pyx":12780 * Get memory address of class instance * """ * def __cinit__(self, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * if _ptr == 0: * self._pvt_ptr = &self._pvt_val */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":12785 * else: * self._pvt_ptr = _ptr * def __init__(self, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * pass * self._accessPolicyWindow = cudaAccessPolicyWindow(_ptr=&self._pvt_ptr[0].accessPolicyWindow) */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_3__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_3__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr; CYTHON_UNUSED Py_ssize_t __pyx_nargs; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1; #endif __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 12785, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12785, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__init__", 0) < (0)) __PYX_ERR(0, 12785, __pyx_L3_error) } else { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12785, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } } if (values[0]) { __pyx_v__ptr = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[0]); if (unlikely((__pyx_v__ptr == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 12785, __pyx_L3_error) } else { __pyx_v__ptr = ((__pyx_t_4cuda_8bindings_7runtime_void_ptr)0); } } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 12785, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_2__init__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self), __pyx_v__ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_2__init__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self, CYTHON_UNUSED __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__init__", 0); /* "cuda/bindings/runtime.pyx":12787 * def __init__(self, void_ptr _ptr = 0): * pass * self._accessPolicyWindow = cudaAccessPolicyWindow(_ptr=&self._pvt_ptr[0].accessPolicyWindow) # <<<<<<<<<<<<<< * self._clusterDim = anon_struct22(_ptr=self._pvt_ptr) * self._programmaticEvent = anon_struct23(_ptr=self._pvt_ptr) */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaAccessPolicyWindow); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaAccessPolicyWindow); __pyx_t_4 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)(&(__pyx_v_self->_pvt_ptr[0]).accessPolicyWindow))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12787, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_2, NULL}; __pyx_t_6 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12787, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_ptr_2, __pyx_t_4, __pyx_t_6, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 12787, __pyx_L1_error) __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder(__pyx_t_3, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_6); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12787, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __Pyx_GIVEREF((PyObject *)__pyx_t_1); __Pyx_GOTREF((PyObject *)__pyx_v_self->_accessPolicyWindow); __Pyx_DECREF((PyObject *)__pyx_v_self->_accessPolicyWindow); __pyx_v_self->_accessPolicyWindow = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaAccessPolicyWindow *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":12788 * pass * self._accessPolicyWindow = cudaAccessPolicyWindow(_ptr=&self._pvt_ptr[0].accessPolicyWindow) * self._clusterDim = anon_struct22(_ptr=self._pvt_ptr) # <<<<<<<<<<<<<< * self._programmaticEvent = anon_struct23(_ptr=self._pvt_ptr) * self._memSyncDomainMap = cudaLaunchMemSyncDomainMap(_ptr=&self._pvt_ptr[0].memSyncDomainMap) */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_anon_struct22); __pyx_t_6 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_anon_struct22); __pyx_t_4 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_v_self->_pvt_ptr)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12788, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_3, NULL}; __pyx_t_2 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12788, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_ptr_2, __pyx_t_4, __pyx_t_2, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 12788, __pyx_L1_error) __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder(__pyx_t_6, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12788, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __Pyx_GIVEREF((PyObject *)__pyx_t_1); __Pyx_GOTREF((PyObject *)__pyx_v_self->_clusterDim); __Pyx_DECREF((PyObject *)__pyx_v_self->_clusterDim); __pyx_v_self->_clusterDim = ((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct22 *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":12789 * self._accessPolicyWindow = cudaAccessPolicyWindow(_ptr=&self._pvt_ptr[0].accessPolicyWindow) * self._clusterDim = anon_struct22(_ptr=self._pvt_ptr) * self._programmaticEvent = anon_struct23(_ptr=self._pvt_ptr) # <<<<<<<<<<<<<< * self._memSyncDomainMap = cudaLaunchMemSyncDomainMap(_ptr=&self._pvt_ptr[0].memSyncDomainMap) * self._preferredClusterDim = anon_struct24(_ptr=self._pvt_ptr) */ __pyx_t_6 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_anon_struct23); __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_anon_struct23); __pyx_t_4 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_v_self->_pvt_ptr)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12789, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_6, NULL}; __pyx_t_3 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12789, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_ptr_2, __pyx_t_4, __pyx_t_3, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 12789, __pyx_L1_error) __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder(__pyx_t_2, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_3); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12789, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __Pyx_GIVEREF((PyObject *)__pyx_t_1); __Pyx_GOTREF((PyObject *)__pyx_v_self->_programmaticEvent); __Pyx_DECREF((PyObject *)__pyx_v_self->_programmaticEvent); __pyx_v_self->_programmaticEvent = ((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct23 *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":12790 * self._clusterDim = anon_struct22(_ptr=self._pvt_ptr) * self._programmaticEvent = anon_struct23(_ptr=self._pvt_ptr) * self._memSyncDomainMap = cudaLaunchMemSyncDomainMap(_ptr=&self._pvt_ptr[0].memSyncDomainMap) # <<<<<<<<<<<<<< * self._preferredClusterDim = anon_struct24(_ptr=self._pvt_ptr) * self._launchCompletionEvent = anon_struct25(_ptr=self._pvt_ptr) */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaLaunchMemSyncDomainMap); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaLaunchMemSyncDomainMap); __pyx_t_4 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)(&(__pyx_v_self->_pvt_ptr[0]).memSyncDomainMap))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12790, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_2, NULL}; __pyx_t_6 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12790, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_ptr_2, __pyx_t_4, __pyx_t_6, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 12790, __pyx_L1_error) __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder(__pyx_t_3, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_6); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12790, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __Pyx_GIVEREF((PyObject *)__pyx_t_1); __Pyx_GOTREF((PyObject *)__pyx_v_self->_memSyncDomainMap); __Pyx_DECREF((PyObject *)__pyx_v_self->_memSyncDomainMap); __pyx_v_self->_memSyncDomainMap = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchMemSyncDomainMap *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":12791 * self._programmaticEvent = anon_struct23(_ptr=self._pvt_ptr) * self._memSyncDomainMap = cudaLaunchMemSyncDomainMap(_ptr=&self._pvt_ptr[0].memSyncDomainMap) * self._preferredClusterDim = anon_struct24(_ptr=self._pvt_ptr) # <<<<<<<<<<<<<< * self._launchCompletionEvent = anon_struct25(_ptr=self._pvt_ptr) * self._deviceUpdatableKernelNode = anon_struct26(_ptr=self._pvt_ptr) */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_anon_struct24); __pyx_t_6 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_anon_struct24); __pyx_t_4 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_v_self->_pvt_ptr)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12791, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_3, NULL}; __pyx_t_2 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12791, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_ptr_2, __pyx_t_4, __pyx_t_2, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 12791, __pyx_L1_error) __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder(__pyx_t_6, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12791, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __Pyx_GIVEREF((PyObject *)__pyx_t_1); __Pyx_GOTREF((PyObject *)__pyx_v_self->_preferredClusterDim); __Pyx_DECREF((PyObject *)__pyx_v_self->_preferredClusterDim); __pyx_v_self->_preferredClusterDim = ((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct24 *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":12792 * self._memSyncDomainMap = cudaLaunchMemSyncDomainMap(_ptr=&self._pvt_ptr[0].memSyncDomainMap) * self._preferredClusterDim = anon_struct24(_ptr=self._pvt_ptr) * self._launchCompletionEvent = anon_struct25(_ptr=self._pvt_ptr) # <<<<<<<<<<<<<< * self._deviceUpdatableKernelNode = anon_struct26(_ptr=self._pvt_ptr) * def __dealloc__(self): */ __pyx_t_6 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_anon_struct25); __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_anon_struct25); __pyx_t_4 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_v_self->_pvt_ptr)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12792, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_6, NULL}; __pyx_t_3 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12792, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_ptr_2, __pyx_t_4, __pyx_t_3, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 12792, __pyx_L1_error) __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder(__pyx_t_2, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_3); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12792, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __Pyx_GIVEREF((PyObject *)__pyx_t_1); __Pyx_GOTREF((PyObject *)__pyx_v_self->_launchCompletionEvent); __Pyx_DECREF((PyObject *)__pyx_v_self->_launchCompletionEvent); __pyx_v_self->_launchCompletionEvent = ((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct25 *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":12793 * self._preferredClusterDim = anon_struct24(_ptr=self._pvt_ptr) * self._launchCompletionEvent = anon_struct25(_ptr=self._pvt_ptr) * self._deviceUpdatableKernelNode = anon_struct26(_ptr=self._pvt_ptr) # <<<<<<<<<<<<<< * def __dealloc__(self): * pass */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_anon_struct26); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_anon_struct26); __pyx_t_4 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_v_self->_pvt_ptr)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12793, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_2, NULL}; __pyx_t_6 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12793, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_ptr_2, __pyx_t_4, __pyx_t_6, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 12793, __pyx_L1_error) __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder(__pyx_t_3, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_6); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12793, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __Pyx_GIVEREF((PyObject *)__pyx_t_1); __Pyx_GOTREF((PyObject *)__pyx_v_self->_deviceUpdatableKernelNode); __Pyx_DECREF((PyObject *)__pyx_v_self->_deviceUpdatableKernelNode); __pyx_v_self->_deviceUpdatableKernelNode = ((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct26 *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":12785 * else: * self._pvt_ptr = _ptr * def __init__(self, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * pass * self._accessPolicyWindow = cudaAccessPolicyWindow(_ptr=&self._pvt_ptr[0].accessPolicyWindow) */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12794 * self._launchCompletionEvent = anon_struct25(_ptr=self._pvt_ptr) * self._deviceUpdatableKernelNode = anon_struct26(_ptr=self._pvt_ptr) * def __dealloc__(self): # <<<<<<<<<<<<<< * pass * def getPtr(self): */ /* Python wrapper */ static void __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_5__dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_5__dealloc__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_4__dealloc__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_4__dealloc__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self) { /* function exit code */ } /* "cuda/bindings/runtime.pyx":12796 * def __dealloc__(self): * pass * def getPtr(self): # <<<<<<<<<<<<<< * return self._pvt_ptr * def __repr__(self): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_7getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_6getPtr, "cudaLaunchAttributeValue.getPtr(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_7getPtr = {"getPtr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_7getPtr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_6getPtr}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_7getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("getPtr (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("getPtr", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("getPtr", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_6getPtr(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_6getPtr(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("getPtr", 0); /* "cuda/bindings/runtime.pyx":12797 * pass * def getPtr(self): * return self._pvt_ptr # <<<<<<<<<<<<<< * def __repr__(self): * if self._pvt_ptr is not NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_v_self->_pvt_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12797, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":12796 * def __dealloc__(self): * pass * def getPtr(self): # <<<<<<<<<<<<<< * return self._pvt_ptr * def __repr__(self): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.getPtr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12798 * def getPtr(self): * return self._pvt_ptr * def __repr__(self): # <<<<<<<<<<<<<< * if self._pvt_ptr is not NULL: * str_list = [] */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_9__repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_9__repr__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_8__repr__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_8__repr__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self) { PyObject *__pyx_v_str_list = NULL; PyObject *__pyx_10genexpr169__pyx_v_line = NULL; PyObject *__pyx_10genexpr170__pyx_v_line = NULL; PyObject *__pyx_10genexpr171__pyx_v_line = NULL; PyObject *__pyx_10genexpr172__pyx_v_line = NULL; PyObject *__pyx_10genexpr173__pyx_v_line = NULL; PyObject *__pyx_10genexpr174__pyx_v_line = NULL; PyObject *__pyx_10genexpr175__pyx_v_line = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; Py_ssize_t __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "cuda/bindings/runtime.pyx":12799 * return self._pvt_ptr * def __repr__(self): * if self._pvt_ptr is not NULL: # <<<<<<<<<<<<<< * str_list = [] * try: */ __pyx_t_1 = (__pyx_v_self->_pvt_ptr != NULL); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":12800 * def __repr__(self): * if self._pvt_ptr is not NULL: * str_list = [] # <<<<<<<<<<<<<< * try: * str_list += ['pad : ' + str(self.pad)] */ __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12800, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_str_list = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":12801 * if self._pvt_ptr is not NULL: * str_list = [] * try: # <<<<<<<<<<<<<< * str_list += ['pad : ' + str(self.pad)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "cuda/bindings/runtime.pyx":12802 * str_list = [] * try: * str_list += ['pad : ' + str(self.pad)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['pad : '] */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_pad_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12802, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __Pyx_PyObject_Unicode(__pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12802, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_pad, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12802, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12802, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 12802, __pyx_L4_error); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12802, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_2)); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":12801 * if self._pvt_ptr is not NULL: * str_list = [] * try: # <<<<<<<<<<<<<< * str_list += ['pad : ' + str(self.pad)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; /* "cuda/bindings/runtime.pyx":12803 * try: * str_list += ['pad : ' + str(self.pad)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['pad : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_2, &__pyx_t_6, &__pyx_t_8) < 0) __PYX_ERR(0, 12803, __pyx_L6_except_error) __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_8); /* "cuda/bindings/runtime.pyx":12804 * str_list += ['pad : ' + str(self.pad)] * except ValueError: * str_list += ['pad : '] # <<<<<<<<<<<<<< * try: * str_list += ['accessPolicyWindow :\n' + '\n'.join([' ' + line for line in str(self.accessPolicyWindow).splitlines()])] */ __pyx_t_9 = PyList_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 12804, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_pad_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_pad_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 0, __pyx_mstate_global->__pyx_kp_u_pad_ValueError) != (0)) __PYX_ERR(0, 12804, __pyx_L6_except_error); __pyx_t_10 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 12804, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_10)); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L5_exception_handled; } goto __pyx_L6_except_error; /* "cuda/bindings/runtime.pyx":12801 * if self._pvt_ptr is not NULL: * str_list = [] * try: # <<<<<<<<<<<<<< * str_list += ['pad : ' + str(self.pad)] * except ValueError: */ __pyx_L6_except_error:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L5_exception_handled:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); __pyx_L9_try_end:; } /* "cuda/bindings/runtime.pyx":12805 * except ValueError: * str_list += ['pad : '] * try: # <<<<<<<<<<<<<< * str_list += ['accessPolicyWindow :\n' + '\n'.join([' ' + line for line in str(self.accessPolicyWindow).splitlines()])] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_5, &__pyx_t_4, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_5); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "cuda/bindings/runtime.pyx":12806 * str_list += ['pad : '] * try: * str_list += ['accessPolicyWindow :\n' + '\n'.join([' ' + line for line in str(self.accessPolicyWindow).splitlines()])] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['accessPolicyWindow : '] */ { /* enter inner scope */ __pyx_t_8 = PyList_New(0); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12806, __pyx_L20_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_accessPolicyWindow_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12806, __pyx_L20_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_2 = __Pyx_PyObject_Unicode(__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12806, __pyx_L20_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyUnicode_Splitlines(((PyObject*)__pyx_t_2), 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12806, __pyx_L20_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __pyx_t_6; __Pyx_INCREF(__pyx_t_2); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; for (;;) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_2); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 12806, __pyx_L20_error) #endif if (__pyx_t_11 >= __pyx_temp) break; } __pyx_t_6 = __Pyx_PyList_GetItemRef(__pyx_t_2, __pyx_t_11); ++__pyx_t_11; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12806, __pyx_L20_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_10genexpr169__pyx_v_line, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyNumber_Add(__pyx_mstate_global->__pyx_kp_u__4, __pyx_10genexpr169__pyx_v_line); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12806, __pyx_L20_error) __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_8, (PyObject*)__pyx_t_6))) __PYX_ERR(0, 12806, __pyx_L20_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_10genexpr169__pyx_v_line); __pyx_10genexpr169__pyx_v_line = 0; goto __pyx_L24_exit_scope; __pyx_L20_error:; __Pyx_XDECREF(__pyx_10genexpr169__pyx_v_line); __pyx_10genexpr169__pyx_v_line = 0; goto __pyx_L12_error; __pyx_L24_exit_scope:; } /* exit inner scope */ __pyx_t_2 = PyUnicode_Join(__pyx_mstate_global->__pyx_kp_u__2, __pyx_t_8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12806, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_accessPolicyWindow, __pyx_t_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12806, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12806, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_8); if (__Pyx_PyList_SET_ITEM(__pyx_t_2, 0, __pyx_t_8) != (0)) __PYX_ERR(0, 12806, __pyx_L12_error); __pyx_t_8 = 0; __pyx_t_8 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12806, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_8)); __pyx_t_8 = 0; /* "cuda/bindings/runtime.pyx":12805 * except ValueError: * str_list += ['pad : '] * try: # <<<<<<<<<<<<<< * str_list += ['accessPolicyWindow :\n' + '\n'.join([' ' + line for line in str(self.accessPolicyWindow).splitlines()])] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L17_try_end; __pyx_L12_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":12807 * try: * str_list += ['accessPolicyWindow :\n' + '\n'.join([' ' + line for line in str(self.accessPolicyWindow).splitlines()])] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['accessPolicyWindow : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_8, &__pyx_t_2, &__pyx_t_6) < 0) __PYX_ERR(0, 12807, __pyx_L14_except_error) __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_6); /* "cuda/bindings/runtime.pyx":12808 * str_list += ['accessPolicyWindow :\n' + '\n'.join([' ' + line for line in str(self.accessPolicyWindow).splitlines()])] * except ValueError: * str_list += ['accessPolicyWindow : '] # <<<<<<<<<<<<<< * try: * str_list += ['cooperative : ' + str(self.cooperative)] */ __pyx_t_10 = PyList_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 12808, __pyx_L14_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_accessPolicyWindow_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_accessPolicyWindow_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_10, 0, __pyx_mstate_global->__pyx_kp_u_accessPolicyWindow_ValueError) != (0)) __PYX_ERR(0, 12808, __pyx_L14_except_error); __pyx_t_9 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 12808, __pyx_L14_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_9)); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; goto __pyx_L13_exception_handled; } goto __pyx_L14_except_error; /* "cuda/bindings/runtime.pyx":12805 * except ValueError: * str_list += ['pad : '] * try: # <<<<<<<<<<<<<< * str_list += ['accessPolicyWindow :\n' + '\n'.join([' ' + line for line in str(self.accessPolicyWindow).splitlines()])] * except ValueError: */ __pyx_L14_except_error:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); goto __pyx_L1_error; __pyx_L13_exception_handled:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); __pyx_L17_try_end:; } /* "cuda/bindings/runtime.pyx":12809 * except ValueError: * str_list += ['accessPolicyWindow : '] * try: # <<<<<<<<<<<<<< * str_list += ['cooperative : ' + str(self.cooperative)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "cuda/bindings/runtime.pyx":12810 * str_list += ['accessPolicyWindow : '] * try: * str_list += ['cooperative : ' + str(self.cooperative)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['cooperative : '] */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_cooperative_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12810, __pyx_L27_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_2 = __Pyx_PyObject_Unicode(__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12810, __pyx_L27_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_cooperative, __pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12810, __pyx_L27_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12810, __pyx_L27_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_6); if (__Pyx_PyList_SET_ITEM(__pyx_t_2, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 12810, __pyx_L27_error); __pyx_t_6 = 0; __pyx_t_6 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12810, __pyx_L27_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_6)); __pyx_t_6 = 0; /* "cuda/bindings/runtime.pyx":12809 * except ValueError: * str_list += ['accessPolicyWindow : '] * try: # <<<<<<<<<<<<<< * str_list += ['cooperative : ' + str(self.cooperative)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L32_try_end; __pyx_L27_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":12811 * try: * str_list += ['cooperative : ' + str(self.cooperative)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['cooperative : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_6, &__pyx_t_2, &__pyx_t_8) < 0) __PYX_ERR(0, 12811, __pyx_L29_except_error) __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_8); /* "cuda/bindings/runtime.pyx":12812 * str_list += ['cooperative : ' + str(self.cooperative)] * except ValueError: * str_list += ['cooperative : '] # <<<<<<<<<<<<<< * try: * str_list += ['syncPolicy : ' + str(self.syncPolicy)] */ __pyx_t_9 = PyList_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 12812, __pyx_L29_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_cooperative_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_cooperative_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 0, __pyx_mstate_global->__pyx_kp_u_cooperative_ValueError) != (0)) __PYX_ERR(0, 12812, __pyx_L29_except_error); __pyx_t_10 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 12812, __pyx_L29_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_10)); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L28_exception_handled; } goto __pyx_L29_except_error; /* "cuda/bindings/runtime.pyx":12809 * except ValueError: * str_list += ['accessPolicyWindow : '] * try: # <<<<<<<<<<<<<< * str_list += ['cooperative : ' + str(self.cooperative)] * except ValueError: */ __pyx_L29_except_error:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L28_exception_handled:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); __pyx_L32_try_end:; } /* "cuda/bindings/runtime.pyx":12813 * except ValueError: * str_list += ['cooperative : '] * try: # <<<<<<<<<<<<<< * str_list += ['syncPolicy : ' + str(self.syncPolicy)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_5, &__pyx_t_4, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_5); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "cuda/bindings/runtime.pyx":12814 * str_list += ['cooperative : '] * try: * str_list += ['syncPolicy : ' + str(self.syncPolicy)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['syncPolicy : '] */ __pyx_t_8 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_syncPolicy_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12814, __pyx_L35_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_2 = __Pyx_PyObject_Unicode(__pyx_t_8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12814, __pyx_L35_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_syncPolicy, __pyx_t_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12814, __pyx_L35_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12814, __pyx_L35_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_8); if (__Pyx_PyList_SET_ITEM(__pyx_t_2, 0, __pyx_t_8) != (0)) __PYX_ERR(0, 12814, __pyx_L35_error); __pyx_t_8 = 0; __pyx_t_8 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12814, __pyx_L35_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_8)); __pyx_t_8 = 0; /* "cuda/bindings/runtime.pyx":12813 * except ValueError: * str_list += ['cooperative : '] * try: # <<<<<<<<<<<<<< * str_list += ['syncPolicy : ' + str(self.syncPolicy)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L40_try_end; __pyx_L35_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":12815 * try: * str_list += ['syncPolicy : ' + str(self.syncPolicy)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['syncPolicy : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_8, &__pyx_t_2, &__pyx_t_6) < 0) __PYX_ERR(0, 12815, __pyx_L37_except_error) __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_6); /* "cuda/bindings/runtime.pyx":12816 * str_list += ['syncPolicy : ' + str(self.syncPolicy)] * except ValueError: * str_list += ['syncPolicy : '] # <<<<<<<<<<<<<< * try: * str_list += ['clusterDim :\n' + '\n'.join([' ' + line for line in str(self.clusterDim).splitlines()])] */ __pyx_t_10 = PyList_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 12816, __pyx_L37_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_syncPolicy_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_syncPolicy_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_10, 0, __pyx_mstate_global->__pyx_kp_u_syncPolicy_ValueError) != (0)) __PYX_ERR(0, 12816, __pyx_L37_except_error); __pyx_t_9 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 12816, __pyx_L37_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_9)); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; goto __pyx_L36_exception_handled; } goto __pyx_L37_except_error; /* "cuda/bindings/runtime.pyx":12813 * except ValueError: * str_list += ['cooperative : '] * try: # <<<<<<<<<<<<<< * str_list += ['syncPolicy : ' + str(self.syncPolicy)] * except ValueError: */ __pyx_L37_except_error:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); goto __pyx_L1_error; __pyx_L36_exception_handled:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); __pyx_L40_try_end:; } /* "cuda/bindings/runtime.pyx":12817 * except ValueError: * str_list += ['syncPolicy : '] * try: # <<<<<<<<<<<<<< * str_list += ['clusterDim :\n' + '\n'.join([' ' + line for line in str(self.clusterDim).splitlines()])] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "cuda/bindings/runtime.pyx":12818 * str_list += ['syncPolicy : '] * try: * str_list += ['clusterDim :\n' + '\n'.join([' ' + line for line in str(self.clusterDim).splitlines()])] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['clusterDim : '] */ { /* enter inner scope */ __pyx_t_6 = PyList_New(0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12818, __pyx_L51_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_clusterDim_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12818, __pyx_L51_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_8 = __Pyx_PyObject_Unicode(__pyx_t_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12818, __pyx_L51_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyUnicode_Splitlines(((PyObject*)__pyx_t_8), 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12818, __pyx_L51_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __pyx_t_2; __Pyx_INCREF(__pyx_t_8); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; for (;;) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_8); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 12818, __pyx_L51_error) #endif if (__pyx_t_11 >= __pyx_temp) break; } __pyx_t_2 = __Pyx_PyList_GetItemRef(__pyx_t_8, __pyx_t_11); ++__pyx_t_11; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12818, __pyx_L51_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XDECREF_SET(__pyx_10genexpr170__pyx_v_line, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_Add(__pyx_mstate_global->__pyx_kp_u__4, __pyx_10genexpr170__pyx_v_line); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12818, __pyx_L51_error) __Pyx_GOTREF(__pyx_t_2); if (unlikely(__Pyx_ListComp_Append(__pyx_t_6, (PyObject*)__pyx_t_2))) __PYX_ERR(0, 12818, __pyx_L51_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_10genexpr170__pyx_v_line); __pyx_10genexpr170__pyx_v_line = 0; goto __pyx_L55_exit_scope; __pyx_L51_error:; __Pyx_XDECREF(__pyx_10genexpr170__pyx_v_line); __pyx_10genexpr170__pyx_v_line = 0; goto __pyx_L43_error; __pyx_L55_exit_scope:; } /* exit inner scope */ __pyx_t_8 = PyUnicode_Join(__pyx_mstate_global->__pyx_kp_u__2, __pyx_t_6); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12818, __pyx_L43_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_clusterDim, __pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12818, __pyx_L43_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = PyList_New(1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12818, __pyx_L43_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_6); if (__Pyx_PyList_SET_ITEM(__pyx_t_8, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 12818, __pyx_L43_error); __pyx_t_6 = 0; __pyx_t_6 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12818, __pyx_L43_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_6)); __pyx_t_6 = 0; /* "cuda/bindings/runtime.pyx":12817 * except ValueError: * str_list += ['syncPolicy : '] * try: # <<<<<<<<<<<<<< * str_list += ['clusterDim :\n' + '\n'.join([' ' + line for line in str(self.clusterDim).splitlines()])] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L48_try_end; __pyx_L43_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":12819 * try: * str_list += ['clusterDim :\n' + '\n'.join([' ' + line for line in str(self.clusterDim).splitlines()])] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['clusterDim : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_6, &__pyx_t_8, &__pyx_t_2) < 0) __PYX_ERR(0, 12819, __pyx_L45_except_error) __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_2); /* "cuda/bindings/runtime.pyx":12820 * str_list += ['clusterDim :\n' + '\n'.join([' ' + line for line in str(self.clusterDim).splitlines()])] * except ValueError: * str_list += ['clusterDim : '] # <<<<<<<<<<<<<< * try: * str_list += ['clusterSchedulingPolicyPreference : ' + str(self.clusterSchedulingPolicyPreference)] */ __pyx_t_9 = PyList_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 12820, __pyx_L45_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_clusterDim_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_clusterDim_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 0, __pyx_mstate_global->__pyx_kp_u_clusterDim_ValueError) != (0)) __PYX_ERR(0, 12820, __pyx_L45_except_error); __pyx_t_10 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 12820, __pyx_L45_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_10)); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; goto __pyx_L44_exception_handled; } goto __pyx_L45_except_error; /* "cuda/bindings/runtime.pyx":12817 * except ValueError: * str_list += ['syncPolicy : '] * try: # <<<<<<<<<<<<<< * str_list += ['clusterDim :\n' + '\n'.join([' ' + line for line in str(self.clusterDim).splitlines()])] * except ValueError: */ __pyx_L45_except_error:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L44_exception_handled:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); __pyx_L48_try_end:; } /* "cuda/bindings/runtime.pyx":12821 * except ValueError: * str_list += ['clusterDim : '] * try: # <<<<<<<<<<<<<< * str_list += ['clusterSchedulingPolicyPreference : ' + str(self.clusterSchedulingPolicyPreference)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_5, &__pyx_t_4, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_5); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "cuda/bindings/runtime.pyx":12822 * str_list += ['clusterDim : '] * try: * str_list += ['clusterSchedulingPolicyPreference : ' + str(self.clusterSchedulingPolicyPreference)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['clusterSchedulingPolicyPreference : '] */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_clusterSchedulingPolicyPreferenc_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12822, __pyx_L58_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_8 = __Pyx_PyObject_Unicode(__pyx_t_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12822, __pyx_L58_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_clusterSchedulingPolicyPreferenc, __pyx_t_8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12822, __pyx_L58_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = PyList_New(1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12822, __pyx_L58_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyList_SET_ITEM(__pyx_t_8, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 12822, __pyx_L58_error); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12822, __pyx_L58_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_2)); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":12821 * except ValueError: * str_list += ['clusterDim : '] * try: # <<<<<<<<<<<<<< * str_list += ['clusterSchedulingPolicyPreference : ' + str(self.clusterSchedulingPolicyPreference)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L63_try_end; __pyx_L58_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":12823 * try: * str_list += ['clusterSchedulingPolicyPreference : ' + str(self.clusterSchedulingPolicyPreference)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['clusterSchedulingPolicyPreference : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_2, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(0, 12823, __pyx_L60_except_error) __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_6); /* "cuda/bindings/runtime.pyx":12824 * str_list += ['clusterSchedulingPolicyPreference : ' + str(self.clusterSchedulingPolicyPreference)] * except ValueError: * str_list += ['clusterSchedulingPolicyPreference : '] # <<<<<<<<<<<<<< * try: * str_list += ['programmaticStreamSerializationAllowed : ' + str(self.programmaticStreamSerializationAllowed)] */ __pyx_t_10 = PyList_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 12824, __pyx_L60_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_clusterSchedulingPolicyPreferenc_3); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_clusterSchedulingPolicyPreferenc_3); if (__Pyx_PyList_SET_ITEM(__pyx_t_10, 0, __pyx_mstate_global->__pyx_kp_u_clusterSchedulingPolicyPreferenc_3) != (0)) __PYX_ERR(0, 12824, __pyx_L60_except_error); __pyx_t_9 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 12824, __pyx_L60_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_9)); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; goto __pyx_L59_exception_handled; } goto __pyx_L60_except_error; /* "cuda/bindings/runtime.pyx":12821 * except ValueError: * str_list += ['clusterDim : '] * try: # <<<<<<<<<<<<<< * str_list += ['clusterSchedulingPolicyPreference : ' + str(self.clusterSchedulingPolicyPreference)] * except ValueError: */ __pyx_L60_except_error:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); goto __pyx_L1_error; __pyx_L59_exception_handled:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); __pyx_L63_try_end:; } /* "cuda/bindings/runtime.pyx":12825 * except ValueError: * str_list += ['clusterSchedulingPolicyPreference : '] * try: # <<<<<<<<<<<<<< * str_list += ['programmaticStreamSerializationAllowed : ' + str(self.programmaticStreamSerializationAllowed)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "cuda/bindings/runtime.pyx":12826 * str_list += ['clusterSchedulingPolicyPreference : '] * try: * str_list += ['programmaticStreamSerializationAllowed : ' + str(self.programmaticStreamSerializationAllowed)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['programmaticStreamSerializationAllowed : '] */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_programmaticStreamSerializationA_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12826, __pyx_L66_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = __Pyx_PyObject_Unicode(__pyx_t_6); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12826, __pyx_L66_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_programmaticStreamSerializationA, __pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12826, __pyx_L66_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = PyList_New(1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12826, __pyx_L66_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_6); if (__Pyx_PyList_SET_ITEM(__pyx_t_8, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 12826, __pyx_L66_error); __pyx_t_6 = 0; __pyx_t_6 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12826, __pyx_L66_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_6)); __pyx_t_6 = 0; /* "cuda/bindings/runtime.pyx":12825 * except ValueError: * str_list += ['clusterSchedulingPolicyPreference : '] * try: # <<<<<<<<<<<<<< * str_list += ['programmaticStreamSerializationAllowed : ' + str(self.programmaticStreamSerializationAllowed)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L71_try_end; __pyx_L66_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":12827 * try: * str_list += ['programmaticStreamSerializationAllowed : ' + str(self.programmaticStreamSerializationAllowed)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['programmaticStreamSerializationAllowed : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_6, &__pyx_t_8, &__pyx_t_2) < 0) __PYX_ERR(0, 12827, __pyx_L68_except_error) __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_2); /* "cuda/bindings/runtime.pyx":12828 * str_list += ['programmaticStreamSerializationAllowed : ' + str(self.programmaticStreamSerializationAllowed)] * except ValueError: * str_list += ['programmaticStreamSerializationAllowed : '] # <<<<<<<<<<<<<< * try: * str_list += ['programmaticEvent :\n' + '\n'.join([' ' + line for line in str(self.programmaticEvent).splitlines()])] */ __pyx_t_9 = PyList_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 12828, __pyx_L68_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_programmaticStreamSerializationA_3); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_programmaticStreamSerializationA_3); if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 0, __pyx_mstate_global->__pyx_kp_u_programmaticStreamSerializationA_3) != (0)) __PYX_ERR(0, 12828, __pyx_L68_except_error); __pyx_t_10 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 12828, __pyx_L68_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_10)); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; goto __pyx_L67_exception_handled; } goto __pyx_L68_except_error; /* "cuda/bindings/runtime.pyx":12825 * except ValueError: * str_list += ['clusterSchedulingPolicyPreference : '] * try: # <<<<<<<<<<<<<< * str_list += ['programmaticStreamSerializationAllowed : ' + str(self.programmaticStreamSerializationAllowed)] * except ValueError: */ __pyx_L68_except_error:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L67_exception_handled:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); __pyx_L71_try_end:; } /* "cuda/bindings/runtime.pyx":12829 * except ValueError: * str_list += ['programmaticStreamSerializationAllowed : '] * try: # <<<<<<<<<<<<<< * str_list += ['programmaticEvent :\n' + '\n'.join([' ' + line for line in str(self.programmaticEvent).splitlines()])] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_5, &__pyx_t_4, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_5); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "cuda/bindings/runtime.pyx":12830 * str_list += ['programmaticStreamSerializationAllowed : '] * try: * str_list += ['programmaticEvent :\n' + '\n'.join([' ' + line for line in str(self.programmaticEvent).splitlines()])] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['programmaticEvent : '] */ { /* enter inner scope */ __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12830, __pyx_L82_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_8 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_programmaticEvent_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12830, __pyx_L82_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_6 = __Pyx_PyObject_Unicode(__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12830, __pyx_L82_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = PyUnicode_Splitlines(((PyObject*)__pyx_t_6), 0); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12830, __pyx_L82_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __pyx_t_8; __Pyx_INCREF(__pyx_t_6); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; for (;;) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_6); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 12830, __pyx_L82_error) #endif if (__pyx_t_11 >= __pyx_temp) break; } __pyx_t_8 = __Pyx_PyList_GetItemRef(__pyx_t_6, __pyx_t_11); ++__pyx_t_11; if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12830, __pyx_L82_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_XDECREF_SET(__pyx_10genexpr171__pyx_v_line, __pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = PyNumber_Add(__pyx_mstate_global->__pyx_kp_u__4, __pyx_10genexpr171__pyx_v_line); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12830, __pyx_L82_error) __Pyx_GOTREF(__pyx_t_8); if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_8))) __PYX_ERR(0, 12830, __pyx_L82_error) __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_10genexpr171__pyx_v_line); __pyx_10genexpr171__pyx_v_line = 0; goto __pyx_L86_exit_scope; __pyx_L82_error:; __Pyx_XDECREF(__pyx_10genexpr171__pyx_v_line); __pyx_10genexpr171__pyx_v_line = 0; goto __pyx_L74_error; __pyx_L86_exit_scope:; } /* exit inner scope */ __pyx_t_6 = PyUnicode_Join(__pyx_mstate_global->__pyx_kp_u__2, __pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12830, __pyx_L74_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_programmaticEvent, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12830, __pyx_L74_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12830, __pyx_L74_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 12830, __pyx_L74_error); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12830, __pyx_L74_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_2)); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":12829 * except ValueError: * str_list += ['programmaticStreamSerializationAllowed : '] * try: # <<<<<<<<<<<<<< * str_list += ['programmaticEvent :\n' + '\n'.join([' ' + line for line in str(self.programmaticEvent).splitlines()])] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L79_try_end; __pyx_L74_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":12831 * try: * str_list += ['programmaticEvent :\n' + '\n'.join([' ' + line for line in str(self.programmaticEvent).splitlines()])] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['programmaticEvent : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_2, &__pyx_t_6, &__pyx_t_8) < 0) __PYX_ERR(0, 12831, __pyx_L76_except_error) __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_8); /* "cuda/bindings/runtime.pyx":12832 * str_list += ['programmaticEvent :\n' + '\n'.join([' ' + line for line in str(self.programmaticEvent).splitlines()])] * except ValueError: * str_list += ['programmaticEvent : '] # <<<<<<<<<<<<<< * try: * str_list += ['priority : ' + str(self.priority)] */ __pyx_t_10 = PyList_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 12832, __pyx_L76_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_programmaticEvent_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_programmaticEvent_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_10, 0, __pyx_mstate_global->__pyx_kp_u_programmaticEvent_ValueError) != (0)) __PYX_ERR(0, 12832, __pyx_L76_except_error); __pyx_t_9 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 12832, __pyx_L76_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_9)); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L75_exception_handled; } goto __pyx_L76_except_error; /* "cuda/bindings/runtime.pyx":12829 * except ValueError: * str_list += ['programmaticStreamSerializationAllowed : '] * try: # <<<<<<<<<<<<<< * str_list += ['programmaticEvent :\n' + '\n'.join([' ' + line for line in str(self.programmaticEvent).splitlines()])] * except ValueError: */ __pyx_L76_except_error:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); goto __pyx_L1_error; __pyx_L75_exception_handled:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); __pyx_L79_try_end:; } /* "cuda/bindings/runtime.pyx":12833 * except ValueError: * str_list += ['programmaticEvent : '] * try: # <<<<<<<<<<<<<< * str_list += ['priority : ' + str(self.priority)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "cuda/bindings/runtime.pyx":12834 * str_list += ['programmaticEvent : '] * try: * str_list += ['priority : ' + str(self.priority)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['priority : '] */ __pyx_t_8 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_priority_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12834, __pyx_L89_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_6 = __Pyx_PyObject_Unicode(__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12834, __pyx_L89_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_priority, __pyx_t_6); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12834, __pyx_L89_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12834, __pyx_L89_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_8); if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_8) != (0)) __PYX_ERR(0, 12834, __pyx_L89_error); __pyx_t_8 = 0; __pyx_t_8 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_6); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12834, __pyx_L89_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_8)); __pyx_t_8 = 0; /* "cuda/bindings/runtime.pyx":12833 * except ValueError: * str_list += ['programmaticEvent : '] * try: # <<<<<<<<<<<<<< * str_list += ['priority : ' + str(self.priority)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L94_try_end; __pyx_L89_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":12835 * try: * str_list += ['priority : ' + str(self.priority)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['priority : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_8, &__pyx_t_6, &__pyx_t_2) < 0) __PYX_ERR(0, 12835, __pyx_L91_except_error) __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_2); /* "cuda/bindings/runtime.pyx":12836 * str_list += ['priority : ' + str(self.priority)] * except ValueError: * str_list += ['priority : '] # <<<<<<<<<<<<<< * try: * str_list += ['memSyncDomainMap :\n' + '\n'.join([' ' + line for line in str(self.memSyncDomainMap).splitlines()])] */ __pyx_t_9 = PyList_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 12836, __pyx_L91_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_priority_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_priority_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 0, __pyx_mstate_global->__pyx_kp_u_priority_ValueError) != (0)) __PYX_ERR(0, 12836, __pyx_L91_except_error); __pyx_t_10 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 12836, __pyx_L91_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_10)); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; goto __pyx_L90_exception_handled; } goto __pyx_L91_except_error; /* "cuda/bindings/runtime.pyx":12833 * except ValueError: * str_list += ['programmaticEvent : '] * try: # <<<<<<<<<<<<<< * str_list += ['priority : ' + str(self.priority)] * except ValueError: */ __pyx_L91_except_error:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L90_exception_handled:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); __pyx_L94_try_end:; } /* "cuda/bindings/runtime.pyx":12837 * except ValueError: * str_list += ['priority : '] * try: # <<<<<<<<<<<<<< * str_list += ['memSyncDomainMap :\n' + '\n'.join([' ' + line for line in str(self.memSyncDomainMap).splitlines()])] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_5, &__pyx_t_4, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_5); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "cuda/bindings/runtime.pyx":12838 * str_list += ['priority : '] * try: * str_list += ['memSyncDomainMap :\n' + '\n'.join([' ' + line for line in str(self.memSyncDomainMap).splitlines()])] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['memSyncDomainMap : '] */ { /* enter inner scope */ __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12838, __pyx_L105_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_memSyncDomainMap_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12838, __pyx_L105_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = __Pyx_PyObject_Unicode(__pyx_t_6); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12838, __pyx_L105_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyUnicode_Splitlines(((PyObject*)__pyx_t_8), 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12838, __pyx_L105_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __pyx_t_6; __Pyx_INCREF(__pyx_t_8); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; for (;;) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_8); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 12838, __pyx_L105_error) #endif if (__pyx_t_11 >= __pyx_temp) break; } __pyx_t_6 = __Pyx_PyList_GetItemRef(__pyx_t_8, __pyx_t_11); ++__pyx_t_11; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12838, __pyx_L105_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_10genexpr172__pyx_v_line, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyNumber_Add(__pyx_mstate_global->__pyx_kp_u__4, __pyx_10genexpr172__pyx_v_line); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12838, __pyx_L105_error) __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(0, 12838, __pyx_L105_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_10genexpr172__pyx_v_line); __pyx_10genexpr172__pyx_v_line = 0; goto __pyx_L109_exit_scope; __pyx_L105_error:; __Pyx_XDECREF(__pyx_10genexpr172__pyx_v_line); __pyx_10genexpr172__pyx_v_line = 0; goto __pyx_L97_error; __pyx_L109_exit_scope:; } /* exit inner scope */ __pyx_t_8 = PyUnicode_Join(__pyx_mstate_global->__pyx_kp_u__2, __pyx_t_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12838, __pyx_L97_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_memSyncDomainMap, __pyx_t_8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12838, __pyx_L97_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = PyList_New(1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12838, __pyx_L97_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyList_SET_ITEM(__pyx_t_8, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 12838, __pyx_L97_error); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12838, __pyx_L97_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_2)); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":12837 * except ValueError: * str_list += ['priority : '] * try: # <<<<<<<<<<<<<< * str_list += ['memSyncDomainMap :\n' + '\n'.join([' ' + line for line in str(self.memSyncDomainMap).splitlines()])] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L102_try_end; __pyx_L97_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":12839 * try: * str_list += ['memSyncDomainMap :\n' + '\n'.join([' ' + line for line in str(self.memSyncDomainMap).splitlines()])] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['memSyncDomainMap : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_2, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(0, 12839, __pyx_L99_except_error) __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_6); /* "cuda/bindings/runtime.pyx":12840 * str_list += ['memSyncDomainMap :\n' + '\n'.join([' ' + line for line in str(self.memSyncDomainMap).splitlines()])] * except ValueError: * str_list += ['memSyncDomainMap : '] # <<<<<<<<<<<<<< * try: * str_list += ['memSyncDomain : ' + str(self.memSyncDomain)] */ __pyx_t_10 = PyList_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 12840, __pyx_L99_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_memSyncDomainMap_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_memSyncDomainMap_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_10, 0, __pyx_mstate_global->__pyx_kp_u_memSyncDomainMap_ValueError) != (0)) __PYX_ERR(0, 12840, __pyx_L99_except_error); __pyx_t_9 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 12840, __pyx_L99_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_9)); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; goto __pyx_L98_exception_handled; } goto __pyx_L99_except_error; /* "cuda/bindings/runtime.pyx":12837 * except ValueError: * str_list += ['priority : '] * try: # <<<<<<<<<<<<<< * str_list += ['memSyncDomainMap :\n' + '\n'.join([' ' + line for line in str(self.memSyncDomainMap).splitlines()])] * except ValueError: */ __pyx_L99_except_error:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); goto __pyx_L1_error; __pyx_L98_exception_handled:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); __pyx_L102_try_end:; } /* "cuda/bindings/runtime.pyx":12841 * except ValueError: * str_list += ['memSyncDomainMap : '] * try: # <<<<<<<<<<<<<< * str_list += ['memSyncDomain : ' + str(self.memSyncDomain)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "cuda/bindings/runtime.pyx":12842 * str_list += ['memSyncDomainMap : '] * try: * str_list += ['memSyncDomain : ' + str(self.memSyncDomain)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['memSyncDomain : '] */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_memSyncDomain_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12842, __pyx_L112_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = __Pyx_PyObject_Unicode(__pyx_t_6); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12842, __pyx_L112_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_memSyncDomain, __pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12842, __pyx_L112_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = PyList_New(1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12842, __pyx_L112_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_6); if (__Pyx_PyList_SET_ITEM(__pyx_t_8, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 12842, __pyx_L112_error); __pyx_t_6 = 0; __pyx_t_6 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12842, __pyx_L112_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_6)); __pyx_t_6 = 0; /* "cuda/bindings/runtime.pyx":12841 * except ValueError: * str_list += ['memSyncDomainMap : '] * try: # <<<<<<<<<<<<<< * str_list += ['memSyncDomain : ' + str(self.memSyncDomain)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L117_try_end; __pyx_L112_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":12843 * try: * str_list += ['memSyncDomain : ' + str(self.memSyncDomain)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['memSyncDomain : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_6, &__pyx_t_8, &__pyx_t_2) < 0) __PYX_ERR(0, 12843, __pyx_L114_except_error) __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_2); /* "cuda/bindings/runtime.pyx":12844 * str_list += ['memSyncDomain : ' + str(self.memSyncDomain)] * except ValueError: * str_list += ['memSyncDomain : '] # <<<<<<<<<<<<<< * try: * str_list += ['preferredClusterDim :\n' + '\n'.join([' ' + line for line in str(self.preferredClusterDim).splitlines()])] */ __pyx_t_9 = PyList_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 12844, __pyx_L114_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_memSyncDomain_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_memSyncDomain_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 0, __pyx_mstate_global->__pyx_kp_u_memSyncDomain_ValueError) != (0)) __PYX_ERR(0, 12844, __pyx_L114_except_error); __pyx_t_10 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 12844, __pyx_L114_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_10)); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; goto __pyx_L113_exception_handled; } goto __pyx_L114_except_error; /* "cuda/bindings/runtime.pyx":12841 * except ValueError: * str_list += ['memSyncDomainMap : '] * try: # <<<<<<<<<<<<<< * str_list += ['memSyncDomain : ' + str(self.memSyncDomain)] * except ValueError: */ __pyx_L114_except_error:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L113_exception_handled:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); __pyx_L117_try_end:; } /* "cuda/bindings/runtime.pyx":12845 * except ValueError: * str_list += ['memSyncDomain : '] * try: # <<<<<<<<<<<<<< * str_list += ['preferredClusterDim :\n' + '\n'.join([' ' + line for line in str(self.preferredClusterDim).splitlines()])] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_5, &__pyx_t_4, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_5); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "cuda/bindings/runtime.pyx":12846 * str_list += ['memSyncDomain : '] * try: * str_list += ['preferredClusterDim :\n' + '\n'.join([' ' + line for line in str(self.preferredClusterDim).splitlines()])] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['preferredClusterDim : '] */ { /* enter inner scope */ __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12846, __pyx_L128_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_8 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_preferredClusterDim_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12846, __pyx_L128_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_6 = __Pyx_PyObject_Unicode(__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12846, __pyx_L128_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = PyUnicode_Splitlines(((PyObject*)__pyx_t_6), 0); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12846, __pyx_L128_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __pyx_t_8; __Pyx_INCREF(__pyx_t_6); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; for (;;) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_6); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 12846, __pyx_L128_error) #endif if (__pyx_t_11 >= __pyx_temp) break; } __pyx_t_8 = __Pyx_PyList_GetItemRef(__pyx_t_6, __pyx_t_11); ++__pyx_t_11; if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12846, __pyx_L128_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_XDECREF_SET(__pyx_10genexpr173__pyx_v_line, __pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = PyNumber_Add(__pyx_mstate_global->__pyx_kp_u__4, __pyx_10genexpr173__pyx_v_line); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12846, __pyx_L128_error) __Pyx_GOTREF(__pyx_t_8); if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_8))) __PYX_ERR(0, 12846, __pyx_L128_error) __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_10genexpr173__pyx_v_line); __pyx_10genexpr173__pyx_v_line = 0; goto __pyx_L132_exit_scope; __pyx_L128_error:; __Pyx_XDECREF(__pyx_10genexpr173__pyx_v_line); __pyx_10genexpr173__pyx_v_line = 0; goto __pyx_L120_error; __pyx_L132_exit_scope:; } /* exit inner scope */ __pyx_t_6 = PyUnicode_Join(__pyx_mstate_global->__pyx_kp_u__2, __pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12846, __pyx_L120_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_preferredClusterDim, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12846, __pyx_L120_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12846, __pyx_L120_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 12846, __pyx_L120_error); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12846, __pyx_L120_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_2)); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":12845 * except ValueError: * str_list += ['memSyncDomain : '] * try: # <<<<<<<<<<<<<< * str_list += ['preferredClusterDim :\n' + '\n'.join([' ' + line for line in str(self.preferredClusterDim).splitlines()])] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L125_try_end; __pyx_L120_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":12847 * try: * str_list += ['preferredClusterDim :\n' + '\n'.join([' ' + line for line in str(self.preferredClusterDim).splitlines()])] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['preferredClusterDim : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_2, &__pyx_t_6, &__pyx_t_8) < 0) __PYX_ERR(0, 12847, __pyx_L122_except_error) __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_8); /* "cuda/bindings/runtime.pyx":12848 * str_list += ['preferredClusterDim :\n' + '\n'.join([' ' + line for line in str(self.preferredClusterDim).splitlines()])] * except ValueError: * str_list += ['preferredClusterDim : '] # <<<<<<<<<<<<<< * try: * str_list += ['launchCompletionEvent :\n' + '\n'.join([' ' + line for line in str(self.launchCompletionEvent).splitlines()])] */ __pyx_t_10 = PyList_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 12848, __pyx_L122_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_preferredClusterDim_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_preferredClusterDim_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_10, 0, __pyx_mstate_global->__pyx_kp_u_preferredClusterDim_ValueError) != (0)) __PYX_ERR(0, 12848, __pyx_L122_except_error); __pyx_t_9 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 12848, __pyx_L122_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_9)); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L121_exception_handled; } goto __pyx_L122_except_error; /* "cuda/bindings/runtime.pyx":12845 * except ValueError: * str_list += ['memSyncDomain : '] * try: # <<<<<<<<<<<<<< * str_list += ['preferredClusterDim :\n' + '\n'.join([' ' + line for line in str(self.preferredClusterDim).splitlines()])] * except ValueError: */ __pyx_L122_except_error:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); goto __pyx_L1_error; __pyx_L121_exception_handled:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); __pyx_L125_try_end:; } /* "cuda/bindings/runtime.pyx":12849 * except ValueError: * str_list += ['preferredClusterDim : '] * try: # <<<<<<<<<<<<<< * str_list += ['launchCompletionEvent :\n' + '\n'.join([' ' + line for line in str(self.launchCompletionEvent).splitlines()])] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "cuda/bindings/runtime.pyx":12850 * str_list += ['preferredClusterDim : '] * try: * str_list += ['launchCompletionEvent :\n' + '\n'.join([' ' + line for line in str(self.launchCompletionEvent).splitlines()])] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['launchCompletionEvent : '] */ { /* enter inner scope */ __pyx_t_8 = PyList_New(0); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12850, __pyx_L143_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_launchCompletionEvent_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12850, __pyx_L143_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_2 = __Pyx_PyObject_Unicode(__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12850, __pyx_L143_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyUnicode_Splitlines(((PyObject*)__pyx_t_2), 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12850, __pyx_L143_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __pyx_t_6; __Pyx_INCREF(__pyx_t_2); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; for (;;) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_2); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 12850, __pyx_L143_error) #endif if (__pyx_t_11 >= __pyx_temp) break; } __pyx_t_6 = __Pyx_PyList_GetItemRef(__pyx_t_2, __pyx_t_11); ++__pyx_t_11; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12850, __pyx_L143_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_10genexpr174__pyx_v_line, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyNumber_Add(__pyx_mstate_global->__pyx_kp_u__4, __pyx_10genexpr174__pyx_v_line); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12850, __pyx_L143_error) __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_8, (PyObject*)__pyx_t_6))) __PYX_ERR(0, 12850, __pyx_L143_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_10genexpr174__pyx_v_line); __pyx_10genexpr174__pyx_v_line = 0; goto __pyx_L147_exit_scope; __pyx_L143_error:; __Pyx_XDECREF(__pyx_10genexpr174__pyx_v_line); __pyx_10genexpr174__pyx_v_line = 0; goto __pyx_L135_error; __pyx_L147_exit_scope:; } /* exit inner scope */ __pyx_t_2 = PyUnicode_Join(__pyx_mstate_global->__pyx_kp_u__2, __pyx_t_8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12850, __pyx_L135_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_launchCompletionEvent, __pyx_t_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12850, __pyx_L135_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12850, __pyx_L135_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_8); if (__Pyx_PyList_SET_ITEM(__pyx_t_2, 0, __pyx_t_8) != (0)) __PYX_ERR(0, 12850, __pyx_L135_error); __pyx_t_8 = 0; __pyx_t_8 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12850, __pyx_L135_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_8)); __pyx_t_8 = 0; /* "cuda/bindings/runtime.pyx":12849 * except ValueError: * str_list += ['preferredClusterDim : '] * try: # <<<<<<<<<<<<<< * str_list += ['launchCompletionEvent :\n' + '\n'.join([' ' + line for line in str(self.launchCompletionEvent).splitlines()])] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L140_try_end; __pyx_L135_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":12851 * try: * str_list += ['launchCompletionEvent :\n' + '\n'.join([' ' + line for line in str(self.launchCompletionEvent).splitlines()])] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['launchCompletionEvent : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_8, &__pyx_t_2, &__pyx_t_6) < 0) __PYX_ERR(0, 12851, __pyx_L137_except_error) __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_6); /* "cuda/bindings/runtime.pyx":12852 * str_list += ['launchCompletionEvent :\n' + '\n'.join([' ' + line for line in str(self.launchCompletionEvent).splitlines()])] * except ValueError: * str_list += ['launchCompletionEvent : '] # <<<<<<<<<<<<<< * try: * str_list += ['deviceUpdatableKernelNode :\n' + '\n'.join([' ' + line for line in str(self.deviceUpdatableKernelNode).splitlines()])] */ __pyx_t_9 = PyList_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 12852, __pyx_L137_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_launchCompletionEvent_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_launchCompletionEvent_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 0, __pyx_mstate_global->__pyx_kp_u_launchCompletionEvent_ValueError) != (0)) __PYX_ERR(0, 12852, __pyx_L137_except_error); __pyx_t_10 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 12852, __pyx_L137_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_10)); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; goto __pyx_L136_exception_handled; } goto __pyx_L137_except_error; /* "cuda/bindings/runtime.pyx":12849 * except ValueError: * str_list += ['preferredClusterDim : '] * try: # <<<<<<<<<<<<<< * str_list += ['launchCompletionEvent :\n' + '\n'.join([' ' + line for line in str(self.launchCompletionEvent).splitlines()])] * except ValueError: */ __pyx_L137_except_error:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L136_exception_handled:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); __pyx_L140_try_end:; } /* "cuda/bindings/runtime.pyx":12853 * except ValueError: * str_list += ['launchCompletionEvent : '] * try: # <<<<<<<<<<<<<< * str_list += ['deviceUpdatableKernelNode :\n' + '\n'.join([' ' + line for line in str(self.deviceUpdatableKernelNode).splitlines()])] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_5, &__pyx_t_4, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_5); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "cuda/bindings/runtime.pyx":12854 * str_list += ['launchCompletionEvent : '] * try: * str_list += ['deviceUpdatableKernelNode :\n' + '\n'.join([' ' + line for line in str(self.deviceUpdatableKernelNode).splitlines()])] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['deviceUpdatableKernelNode : '] */ { /* enter inner scope */ __pyx_t_6 = PyList_New(0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12854, __pyx_L158_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_deviceUpdatableKernelNode_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12854, __pyx_L158_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_8 = __Pyx_PyObject_Unicode(__pyx_t_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12854, __pyx_L158_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyUnicode_Splitlines(((PyObject*)__pyx_t_8), 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12854, __pyx_L158_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __pyx_t_2; __Pyx_INCREF(__pyx_t_8); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; for (;;) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_8); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 12854, __pyx_L158_error) #endif if (__pyx_t_11 >= __pyx_temp) break; } __pyx_t_2 = __Pyx_PyList_GetItemRef(__pyx_t_8, __pyx_t_11); ++__pyx_t_11; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12854, __pyx_L158_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XDECREF_SET(__pyx_10genexpr175__pyx_v_line, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_Add(__pyx_mstate_global->__pyx_kp_u__4, __pyx_10genexpr175__pyx_v_line); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12854, __pyx_L158_error) __Pyx_GOTREF(__pyx_t_2); if (unlikely(__Pyx_ListComp_Append(__pyx_t_6, (PyObject*)__pyx_t_2))) __PYX_ERR(0, 12854, __pyx_L158_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_10genexpr175__pyx_v_line); __pyx_10genexpr175__pyx_v_line = 0; goto __pyx_L162_exit_scope; __pyx_L158_error:; __Pyx_XDECREF(__pyx_10genexpr175__pyx_v_line); __pyx_10genexpr175__pyx_v_line = 0; goto __pyx_L150_error; __pyx_L162_exit_scope:; } /* exit inner scope */ __pyx_t_8 = PyUnicode_Join(__pyx_mstate_global->__pyx_kp_u__2, __pyx_t_6); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12854, __pyx_L150_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_deviceUpdatableKernelNode, __pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12854, __pyx_L150_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = PyList_New(1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12854, __pyx_L150_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_6); if (__Pyx_PyList_SET_ITEM(__pyx_t_8, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 12854, __pyx_L150_error); __pyx_t_6 = 0; __pyx_t_6 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12854, __pyx_L150_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_6)); __pyx_t_6 = 0; /* "cuda/bindings/runtime.pyx":12853 * except ValueError: * str_list += ['launchCompletionEvent : '] * try: # <<<<<<<<<<<<<< * str_list += ['deviceUpdatableKernelNode :\n' + '\n'.join([' ' + line for line in str(self.deviceUpdatableKernelNode).splitlines()])] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L155_try_end; __pyx_L150_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":12855 * try: * str_list += ['deviceUpdatableKernelNode :\n' + '\n'.join([' ' + line for line in str(self.deviceUpdatableKernelNode).splitlines()])] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['deviceUpdatableKernelNode : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_6, &__pyx_t_8, &__pyx_t_2) < 0) __PYX_ERR(0, 12855, __pyx_L152_except_error) __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_2); /* "cuda/bindings/runtime.pyx":12856 * str_list += ['deviceUpdatableKernelNode :\n' + '\n'.join([' ' + line for line in str(self.deviceUpdatableKernelNode).splitlines()])] * except ValueError: * str_list += ['deviceUpdatableKernelNode : '] # <<<<<<<<<<<<<< * try: * str_list += ['sharedMemCarveout : ' + str(self.sharedMemCarveout)] */ __pyx_t_10 = PyList_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 12856, __pyx_L152_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_deviceUpdatableKernelNode_ValueE); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_deviceUpdatableKernelNode_ValueE); if (__Pyx_PyList_SET_ITEM(__pyx_t_10, 0, __pyx_mstate_global->__pyx_kp_u_deviceUpdatableKernelNode_ValueE) != (0)) __PYX_ERR(0, 12856, __pyx_L152_except_error); __pyx_t_9 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 12856, __pyx_L152_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_9)); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; goto __pyx_L151_exception_handled; } goto __pyx_L152_except_error; /* "cuda/bindings/runtime.pyx":12853 * except ValueError: * str_list += ['launchCompletionEvent : '] * try: # <<<<<<<<<<<<<< * str_list += ['deviceUpdatableKernelNode :\n' + '\n'.join([' ' + line for line in str(self.deviceUpdatableKernelNode).splitlines()])] * except ValueError: */ __pyx_L152_except_error:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); goto __pyx_L1_error; __pyx_L151_exception_handled:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); __pyx_L155_try_end:; } /* "cuda/bindings/runtime.pyx":12857 * except ValueError: * str_list += ['deviceUpdatableKernelNode : '] * try: # <<<<<<<<<<<<<< * str_list += ['sharedMemCarveout : ' + str(self.sharedMemCarveout)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "cuda/bindings/runtime.pyx":12858 * str_list += ['deviceUpdatableKernelNode : '] * try: * str_list += ['sharedMemCarveout : ' + str(self.sharedMemCarveout)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['sharedMemCarveout : '] */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_sharedMemCarveout_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12858, __pyx_L165_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_8 = __Pyx_PyObject_Unicode(__pyx_t_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12858, __pyx_L165_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_sharedMemCarveout, __pyx_t_8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12858, __pyx_L165_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = PyList_New(1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 12858, __pyx_L165_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyList_SET_ITEM(__pyx_t_8, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 12858, __pyx_L165_error); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12858, __pyx_L165_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_2)); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":12857 * except ValueError: * str_list += ['deviceUpdatableKernelNode : '] * try: # <<<<<<<<<<<<<< * str_list += ['sharedMemCarveout : ' + str(self.sharedMemCarveout)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L170_try_end; __pyx_L165_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":12859 * try: * str_list += ['sharedMemCarveout : ' + str(self.sharedMemCarveout)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['sharedMemCarveout : '] * return '\n'.join(str_list) */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_2, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(0, 12859, __pyx_L167_except_error) __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_6); /* "cuda/bindings/runtime.pyx":12860 * str_list += ['sharedMemCarveout : ' + str(self.sharedMemCarveout)] * except ValueError: * str_list += ['sharedMemCarveout : '] # <<<<<<<<<<<<<< * return '\n'.join(str_list) * else: */ __pyx_t_9 = PyList_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 12860, __pyx_L167_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_sharedMemCarveout_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_sharedMemCarveout_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 0, __pyx_mstate_global->__pyx_kp_u_sharedMemCarveout_ValueError) != (0)) __PYX_ERR(0, 12860, __pyx_L167_except_error); __pyx_t_10 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 12860, __pyx_L167_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_10)); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; goto __pyx_L166_exception_handled; } goto __pyx_L167_except_error; /* "cuda/bindings/runtime.pyx":12857 * except ValueError: * str_list += ['deviceUpdatableKernelNode : '] * try: # <<<<<<<<<<<<<< * str_list += ['sharedMemCarveout : ' + str(self.sharedMemCarveout)] * except ValueError: */ __pyx_L167_except_error:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L166_exception_handled:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); __pyx_L170_try_end:; } /* "cuda/bindings/runtime.pyx":12861 * except ValueError: * str_list += ['sharedMemCarveout : '] * return '\n'.join(str_list) # <<<<<<<<<<<<<< * else: * return '' */ __Pyx_XDECREF(__pyx_r); __pyx_t_6 = PyUnicode_Join(__pyx_mstate_global->__pyx_kp_u__2, __pyx_v_str_list); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12861, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":12799 * return self._pvt_ptr * def __repr__(self): * if self._pvt_ptr is not NULL: # <<<<<<<<<<<<<< * str_list = [] * try: */ } /* "cuda/bindings/runtime.pyx":12863 * return '\n'.join(str_list) * else: * return '' # <<<<<<<<<<<<<< * @property * def pad(self): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u__3); __pyx_r = __pyx_mstate_global->__pyx_kp_u__3; goto __pyx_L0; } /* "cuda/bindings/runtime.pyx":12798 * def getPtr(self): * return self._pvt_ptr * def __repr__(self): # <<<<<<<<<<<<<< * if self._pvt_ptr is not NULL: * str_list = [] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_str_list); __Pyx_XDECREF(__pyx_10genexpr169__pyx_v_line); __Pyx_XDECREF(__pyx_10genexpr170__pyx_v_line); __Pyx_XDECREF(__pyx_10genexpr171__pyx_v_line); __Pyx_XDECREF(__pyx_10genexpr172__pyx_v_line); __Pyx_XDECREF(__pyx_10genexpr173__pyx_v_line); __Pyx_XDECREF(__pyx_10genexpr174__pyx_v_line); __Pyx_XDECREF(__pyx_10genexpr175__pyx_v_line); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12864 * else: * return '' * @property # <<<<<<<<<<<<<< * def pad(self): * return PyBytes_FromStringAndSize(self._pvt_ptr[0].pad, 64) */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_3pad_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_3pad_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_3pad___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_3pad___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":12866 * @property * def pad(self): * return PyBytes_FromStringAndSize(self._pvt_ptr[0].pad, 64) # <<<<<<<<<<<<<< * @pad.setter * def pad(self, pad): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyBytes_FromStringAndSize((__pyx_v_self->_pvt_ptr[0]).pad, 64); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12866, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":12864 * else: * return '' * @property # <<<<<<<<<<<<<< * def pad(self): * return PyBytes_FromStringAndSize(self._pvt_ptr[0].pad, 64) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.pad.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12867 * def pad(self): * return PyBytes_FromStringAndSize(self._pvt_ptr[0].pad, 64) * @pad.setter # <<<<<<<<<<<<<< * def pad(self, pad): * if len(pad) != 64: */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_3pad_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_pad); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_3pad_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_pad) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_3pad_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self), ((PyObject *)__pyx_v_pad)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_3pad_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self, PyObject *__pyx_v_pad) { PyObject *__pyx_v_i = NULL; PyObject *__pyx_v_b = NULL; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; size_t __pyx_t_8; PyObject *(*__pyx_t_9)(PyObject *); int __pyx_t_10; char __pyx_t_11; Py_ssize_t __pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); /* "cuda/bindings/runtime.pyx":12869 * @pad.setter * def pad(self, pad): * if len(pad) != 64: # <<<<<<<<<<<<<< * raise ValueError("pad length must be 64, is " + str(len(pad))) * if CHAR_MIN == 0: */ __pyx_t_1 = PyObject_Length(__pyx_v_pad); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 12869, __pyx_L1_error) __pyx_t_2 = (__pyx_t_1 != 64); if (unlikely(__pyx_t_2)) { /* "cuda/bindings/runtime.pyx":12870 * def pad(self, pad): * if len(pad) != 64: * raise ValueError("pad length must be 64, is " + str(len(pad))) # <<<<<<<<<<<<<< * if CHAR_MIN == 0: * for i, b in enumerate(pad): */ __pyx_t_4 = NULL; __Pyx_INCREF(__pyx_builtin_ValueError); __pyx_t_5 = __pyx_builtin_ValueError; __pyx_t_1 = PyObject_Length(__pyx_v_pad); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 12870, __pyx_L1_error) __pyx_t_6 = PyLong_FromSsize_t(__pyx_t_1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12870, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyObject_Unicode(__pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 12870, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_pad_length_must_be_64_is, __pyx_t_7); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12870, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_8 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_6}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_8, (2-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12870, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 12870, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":12869 * @pad.setter * def pad(self, pad): * if len(pad) != 64: # <<<<<<<<<<<<<< * raise ValueError("pad length must be 64, is " + str(len(pad))) * if CHAR_MIN == 0: */ } /* "cuda/bindings/runtime.pyx":12871 * if len(pad) != 64: * raise ValueError("pad length must be 64, is " + str(len(pad))) * if CHAR_MIN == 0: # <<<<<<<<<<<<<< * for i, b in enumerate(pad): * if b < 0 and b > -129: */ __pyx_t_2 = (CHAR_MIN == 0); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":12872 * raise ValueError("pad length must be 64, is " + str(len(pad))) * if CHAR_MIN == 0: * for i, b in enumerate(pad): # <<<<<<<<<<<<<< * if b < 0 and b > -129: * b = b + 256 */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_t_3 = __pyx_mstate_global->__pyx_int_0; if (likely(PyList_CheckExact(__pyx_v_pad)) || PyTuple_CheckExact(__pyx_v_pad)) { __pyx_t_5 = __pyx_v_pad; __Pyx_INCREF(__pyx_t_5); __pyx_t_1 = 0; __pyx_t_9 = NULL; } else { __pyx_t_1 = -1; __pyx_t_5 = PyObject_GetIter(__pyx_v_pad); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12872, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_9 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_5); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 12872, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_9)) { if (likely(PyList_CheckExact(__pyx_t_5))) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_5); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 12872, __pyx_L1_error) #endif if (__pyx_t_1 >= __pyx_temp) break; } __pyx_t_6 = __Pyx_PyList_GetItemRef(__pyx_t_5, __pyx_t_1); ++__pyx_t_1; } else { { Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_5); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 12872, __pyx_L1_error) #endif if (__pyx_t_1 >= __pyx_temp) break; } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_6 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_5, __pyx_t_1)); #else __pyx_t_6 = __Pyx_PySequence_ITEM(__pyx_t_5, __pyx_t_1); #endif ++__pyx_t_1; } if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12872, __pyx_L1_error) } else { __pyx_t_6 = __pyx_t_9(__pyx_t_5); if (unlikely(!__pyx_t_6)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 12872, __pyx_L1_error) PyErr_Clear(); } break; } } __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_v_b, __pyx_t_6); __pyx_t_6 = 0; __Pyx_INCREF(__pyx_t_3); __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_3); __pyx_t_6 = __Pyx_PyLong_AddObjC(__pyx_t_3, __pyx_mstate_global->__pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12872, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = __pyx_t_6; __pyx_t_6 = 0; /* "cuda/bindings/runtime.pyx":12873 * if CHAR_MIN == 0: * for i, b in enumerate(pad): * if b < 0 and b > -129: # <<<<<<<<<<<<<< * b = b + 256 * self._pvt_ptr[0].pad[i] = b */ __pyx_t_6 = PyObject_RichCompare(__pyx_v_b, __pyx_mstate_global->__pyx_int_0, Py_LT); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12873, __pyx_L1_error) __pyx_t_10 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_10 < 0))) __PYX_ERR(0, 12873, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (__pyx_t_10) { } else { __pyx_t_2 = __pyx_t_10; goto __pyx_L8_bool_binop_done; } __pyx_t_6 = PyObject_RichCompare(__pyx_v_b, __pyx_mstate_global->__pyx_int_neg_129, Py_GT); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12873, __pyx_L1_error) __pyx_t_10 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_10 < 0))) __PYX_ERR(0, 12873, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_2 = __pyx_t_10; __pyx_L8_bool_binop_done:; if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":12874 * for i, b in enumerate(pad): * if b < 0 and b > -129: * b = b + 256 # <<<<<<<<<<<<<< * self._pvt_ptr[0].pad[i] = b * else: */ __pyx_t_6 = __Pyx_PyLong_AddObjC(__pyx_v_b, __pyx_mstate_global->__pyx_int_256, 0x100, 0, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12874, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF_SET(__pyx_v_b, __pyx_t_6); __pyx_t_6 = 0; /* "cuda/bindings/runtime.pyx":12873 * if CHAR_MIN == 0: * for i, b in enumerate(pad): * if b < 0 and b > -129: # <<<<<<<<<<<<<< * b = b + 256 * self._pvt_ptr[0].pad[i] = b */ } /* "cuda/bindings/runtime.pyx":12875 * if b < 0 and b > -129: * b = b + 256 * self._pvt_ptr[0].pad[i] = b # <<<<<<<<<<<<<< * else: * for i, b in enumerate(pad): */ __pyx_t_11 = __Pyx_PyLong_As_char(__pyx_v_b); if (unlikely((__pyx_t_11 == (char)-1) && PyErr_Occurred())) __PYX_ERR(0, 12875, __pyx_L1_error) __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_v_i); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 12875, __pyx_L1_error) ((__pyx_v_self->_pvt_ptr[0]).pad[__pyx_t_12]) = __pyx_t_11; /* "cuda/bindings/runtime.pyx":12872 * raise ValueError("pad length must be 64, is " + str(len(pad))) * if CHAR_MIN == 0: * for i, b in enumerate(pad): # <<<<<<<<<<<<<< * if b < 0 and b > -129: * b = b + 256 */ } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":12871 * if len(pad) != 64: * raise ValueError("pad length must be 64, is " + str(len(pad))) * if CHAR_MIN == 0: # <<<<<<<<<<<<<< * for i, b in enumerate(pad): * if b < 0 and b > -129: */ goto __pyx_L4; } /* "cuda/bindings/runtime.pyx":12877 * self._pvt_ptr[0].pad[i] = b * else: * for i, b in enumerate(pad): # <<<<<<<<<<<<<< * if b > 127 and b < 256: * b = b - 256 */ /*else*/ { __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_t_3 = __pyx_mstate_global->__pyx_int_0; if (likely(PyList_CheckExact(__pyx_v_pad)) || PyTuple_CheckExact(__pyx_v_pad)) { __pyx_t_5 = __pyx_v_pad; __Pyx_INCREF(__pyx_t_5); __pyx_t_1 = 0; __pyx_t_9 = NULL; } else { __pyx_t_1 = -1; __pyx_t_5 = PyObject_GetIter(__pyx_v_pad); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 12877, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_9 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_5); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 12877, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_9)) { if (likely(PyList_CheckExact(__pyx_t_5))) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_5); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 12877, __pyx_L1_error) #endif if (__pyx_t_1 >= __pyx_temp) break; } __pyx_t_6 = __Pyx_PyList_GetItemRef(__pyx_t_5, __pyx_t_1); ++__pyx_t_1; } else { { Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_5); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 12877, __pyx_L1_error) #endif if (__pyx_t_1 >= __pyx_temp) break; } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_6 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_5, __pyx_t_1)); #else __pyx_t_6 = __Pyx_PySequence_ITEM(__pyx_t_5, __pyx_t_1); #endif ++__pyx_t_1; } if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12877, __pyx_L1_error) } else { __pyx_t_6 = __pyx_t_9(__pyx_t_5); if (unlikely(!__pyx_t_6)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 12877, __pyx_L1_error) PyErr_Clear(); } break; } } __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_v_b, __pyx_t_6); __pyx_t_6 = 0; __Pyx_INCREF(__pyx_t_3); __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_3); __pyx_t_6 = __Pyx_PyLong_AddObjC(__pyx_t_3, __pyx_mstate_global->__pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12877, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = __pyx_t_6; __pyx_t_6 = 0; /* "cuda/bindings/runtime.pyx":12878 * else: * for i, b in enumerate(pad): * if b > 127 and b < 256: # <<<<<<<<<<<<<< * b = b - 256 * self._pvt_ptr[0].pad[i] = b */ __pyx_t_6 = PyObject_RichCompare(__pyx_v_b, __pyx_mstate_global->__pyx_int_127, Py_GT); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12878, __pyx_L1_error) __pyx_t_10 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_10 < 0))) __PYX_ERR(0, 12878, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (__pyx_t_10) { } else { __pyx_t_2 = __pyx_t_10; goto __pyx_L14_bool_binop_done; } __pyx_t_6 = PyObject_RichCompare(__pyx_v_b, __pyx_mstate_global->__pyx_int_256, Py_LT); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12878, __pyx_L1_error) __pyx_t_10 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_10 < 0))) __PYX_ERR(0, 12878, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_2 = __pyx_t_10; __pyx_L14_bool_binop_done:; if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":12879 * for i, b in enumerate(pad): * if b > 127 and b < 256: * b = b - 256 # <<<<<<<<<<<<<< * self._pvt_ptr[0].pad[i] = b * @property */ __pyx_t_6 = __Pyx_PyLong_SubtractObjC(__pyx_v_b, __pyx_mstate_global->__pyx_int_256, 0x100, 0, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12879, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF_SET(__pyx_v_b, __pyx_t_6); __pyx_t_6 = 0; /* "cuda/bindings/runtime.pyx":12878 * else: * for i, b in enumerate(pad): * if b > 127 and b < 256: # <<<<<<<<<<<<<< * b = b - 256 * self._pvt_ptr[0].pad[i] = b */ } /* "cuda/bindings/runtime.pyx":12880 * if b > 127 and b < 256: * b = b - 256 * self._pvt_ptr[0].pad[i] = b # <<<<<<<<<<<<<< * @property * def accessPolicyWindow(self): */ __pyx_t_11 = __Pyx_PyLong_As_char(__pyx_v_b); if (unlikely((__pyx_t_11 == (char)-1) && PyErr_Occurred())) __PYX_ERR(0, 12880, __pyx_L1_error) __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_v_i); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 12880, __pyx_L1_error) ((__pyx_v_self->_pvt_ptr[0]).pad[__pyx_t_12]) = __pyx_t_11; /* "cuda/bindings/runtime.pyx":12877 * self._pvt_ptr[0].pad[i] = b * else: * for i, b in enumerate(pad): # <<<<<<<<<<<<<< * if b > 127 and b < 256: * b = b - 256 */ } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L4:; /* "cuda/bindings/runtime.pyx":12867 * def pad(self): * return PyBytes_FromStringAndSize(self._pvt_ptr[0].pad, 64) * @pad.setter # <<<<<<<<<<<<<< * def pad(self, pad): * if len(pad) != 64: */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.pad.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_i); __Pyx_XDECREF(__pyx_v_b); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12881 * b = b - 256 * self._pvt_ptr[0].pad[i] = b * @property # <<<<<<<<<<<<<< * def accessPolicyWindow(self): * return self._accessPolicyWindow */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_18accessPolicyWindow_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_18accessPolicyWindow_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_18accessPolicyWindow___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_18accessPolicyWindow___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":12883 * @property * def accessPolicyWindow(self): * return self._accessPolicyWindow # <<<<<<<<<<<<<< * @accessPolicyWindow.setter * def accessPolicyWindow(self, accessPolicyWindow not None : cudaAccessPolicyWindow): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF((PyObject *)__pyx_v_self->_accessPolicyWindow); __pyx_r = ((PyObject *)__pyx_v_self->_accessPolicyWindow); goto __pyx_L0; /* "cuda/bindings/runtime.pyx":12881 * b = b - 256 * self._pvt_ptr[0].pad[i] = b * @property # <<<<<<<<<<<<<< * def accessPolicyWindow(self): * return self._accessPolicyWindow */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12884 * def accessPolicyWindow(self): * return self._accessPolicyWindow * @accessPolicyWindow.setter # <<<<<<<<<<<<<< * def accessPolicyWindow(self, accessPolicyWindow not None : cudaAccessPolicyWindow): * string.memcpy(&self._pvt_ptr[0].accessPolicyWindow, accessPolicyWindow.getPtr(), sizeof(self._pvt_ptr[0].accessPolicyWindow)) */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_18accessPolicyWindow_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_accessPolicyWindow); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_18accessPolicyWindow_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_accessPolicyWindow) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_accessPolicyWindow), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaAccessPolicyWindow, 0, "accessPolicyWindow", 0))) __PYX_ERR(0, 12885, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_18accessPolicyWindow_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self), ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaAccessPolicyWindow *)__pyx_v_accessPolicyWindow)); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; goto __pyx_L5_cleaned_up; __pyx_L0:; __pyx_L5_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_18accessPolicyWindow_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self, struct __pyx_obj_4cuda_8bindings_7runtime_cudaAccessPolicyWindow *__pyx_v_accessPolicyWindow) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; size_t __pyx_t_3; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); /* "cuda/bindings/runtime.pyx":12886 * @accessPolicyWindow.setter * def accessPolicyWindow(self, accessPolicyWindow not None : cudaAccessPolicyWindow): * string.memcpy(&self._pvt_ptr[0].accessPolicyWindow, accessPolicyWindow.getPtr(), sizeof(self._pvt_ptr[0].accessPolicyWindow)) # <<<<<<<<<<<<<< * @property * def cooperative(self): */ __pyx_t_2 = ((PyObject *)__pyx_v_accessPolicyWindow); __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCallMethod(__pyx_mstate_global->__pyx_n_u_getPtr, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12886, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 12886, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; (void)(memcpy((&(__pyx_v_self->_pvt_ptr[0]).accessPolicyWindow), ((struct cudaAccessPolicyWindow *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_4)), (sizeof((__pyx_v_self->_pvt_ptr[0]).accessPolicyWindow)))); /* "cuda/bindings/runtime.pyx":12884 * def accessPolicyWindow(self): * return self._accessPolicyWindow * @accessPolicyWindow.setter # <<<<<<<<<<<<<< * def accessPolicyWindow(self, accessPolicyWindow not None : cudaAccessPolicyWindow): * string.memcpy(&self._pvt_ptr[0].accessPolicyWindow, accessPolicyWindow.getPtr(), sizeof(self._pvt_ptr[0].accessPolicyWindow)) */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.accessPolicyWindow.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12887 * def accessPolicyWindow(self, accessPolicyWindow not None : cudaAccessPolicyWindow): * string.memcpy(&self._pvt_ptr[0].accessPolicyWindow, accessPolicyWindow.getPtr(), sizeof(self._pvt_ptr[0].accessPolicyWindow)) * @property # <<<<<<<<<<<<<< * def cooperative(self): * return self._pvt_ptr[0].cooperative */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_11cooperative_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_11cooperative_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_11cooperative___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_11cooperative___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":12889 * @property * def cooperative(self): * return self._pvt_ptr[0].cooperative # <<<<<<<<<<<<<< * @cooperative.setter * def cooperative(self, int cooperative): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_int((__pyx_v_self->_pvt_ptr[0]).cooperative); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12889, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":12887 * def accessPolicyWindow(self, accessPolicyWindow not None : cudaAccessPolicyWindow): * string.memcpy(&self._pvt_ptr[0].accessPolicyWindow, accessPolicyWindow.getPtr(), sizeof(self._pvt_ptr[0].accessPolicyWindow)) * @property # <<<<<<<<<<<<<< * def cooperative(self): * return self._pvt_ptr[0].cooperative */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.cooperative.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12890 * def cooperative(self): * return self._pvt_ptr[0].cooperative * @cooperative.setter # <<<<<<<<<<<<<< * def cooperative(self, int cooperative): * self._pvt_ptr[0].cooperative = cooperative */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_11cooperative_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_cooperative); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_11cooperative_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_cooperative) { int __pyx_v_cooperative; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); assert(__pyx_arg_cooperative); { __pyx_v_cooperative = __Pyx_PyLong_As_int(__pyx_arg_cooperative); if (unlikely((__pyx_v_cooperative == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12891, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.cooperative.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_11cooperative_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self), ((int)__pyx_v_cooperative)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_11cooperative_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self, int __pyx_v_cooperative) { int __pyx_r; /* "cuda/bindings/runtime.pyx":12892 * @cooperative.setter * def cooperative(self, int cooperative): * self._pvt_ptr[0].cooperative = cooperative # <<<<<<<<<<<<<< * @property * def syncPolicy(self): */ (__pyx_v_self->_pvt_ptr[0]).cooperative = __pyx_v_cooperative; /* "cuda/bindings/runtime.pyx":12890 * def cooperative(self): * return self._pvt_ptr[0].cooperative * @cooperative.setter # <<<<<<<<<<<<<< * def cooperative(self, int cooperative): * self._pvt_ptr[0].cooperative = cooperative */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":12893 * def cooperative(self, int cooperative): * self._pvt_ptr[0].cooperative = cooperative * @property # <<<<<<<<<<<<<< * def syncPolicy(self): * if self._pvt_ptr[0].syncPolicy not in _dict_cudaSynchronizationPolicy: */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_10syncPolicy_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_10syncPolicy_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_10syncPolicy___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_10syncPolicy___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":12895 * @property * def syncPolicy(self): * if self._pvt_ptr[0].syncPolicy not in _dict_cudaSynchronizationPolicy: # <<<<<<<<<<<<<< * return None * return _dict_cudaSynchronizationPolicy[self._pvt_ptr[0].syncPolicy] */ __pyx_t_1 = __Pyx_PyLong_From_enum__cudaSynchronizationPolicy((__pyx_v_self->_pvt_ptr[0]).syncPolicy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12895, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaSynchronizationPolicy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12895, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = (__Pyx_PySequence_ContainsTF(__pyx_t_1, __pyx_t_2, Py_NE)); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 12895, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (__pyx_t_3) { /* "cuda/bindings/runtime.pyx":12896 * def syncPolicy(self): * if self._pvt_ptr[0].syncPolicy not in _dict_cudaSynchronizationPolicy: * return None # <<<<<<<<<<<<<< * return _dict_cudaSynchronizationPolicy[self._pvt_ptr[0].syncPolicy] * @syncPolicy.setter */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "cuda/bindings/runtime.pyx":12895 * @property * def syncPolicy(self): * if self._pvt_ptr[0].syncPolicy not in _dict_cudaSynchronizationPolicy: # <<<<<<<<<<<<<< * return None * return _dict_cudaSynchronizationPolicy[self._pvt_ptr[0].syncPolicy] */ } /* "cuda/bindings/runtime.pyx":12897 * if self._pvt_ptr[0].syncPolicy not in _dict_cudaSynchronizationPolicy: * return None * return _dict_cudaSynchronizationPolicy[self._pvt_ptr[0].syncPolicy] # <<<<<<<<<<<<<< * @syncPolicy.setter * def syncPolicy(self, syncPolicy not None : cudaSynchronizationPolicy): */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaSynchronizationPolicy); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12897, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyLong_From_enum__cudaSynchronizationPolicy((__pyx_v_self->_pvt_ptr[0]).syncPolicy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12897, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_2, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12897, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":12893 * def cooperative(self, int cooperative): * self._pvt_ptr[0].cooperative = cooperative * @property # <<<<<<<<<<<<<< * def syncPolicy(self): * if self._pvt_ptr[0].syncPolicy not in _dict_cudaSynchronizationPolicy: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.syncPolicy.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12898 * return None * return _dict_cudaSynchronizationPolicy[self._pvt_ptr[0].syncPolicy] * @syncPolicy.setter # <<<<<<<<<<<<<< * def syncPolicy(self, syncPolicy not None : cudaSynchronizationPolicy): * self._pvt_ptr[0].syncPolicy = syncPolicy.value */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_10syncPolicy_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_syncPolicy); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_10syncPolicy_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_syncPolicy) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); if (unlikely(((PyObject *)__pyx_v_syncPolicy) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "syncPolicy"); __PYX_ERR(0, 12899, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_10syncPolicy_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self), ((PyObject *)__pyx_v_syncPolicy)); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; goto __pyx_L5_cleaned_up; __pyx_L0:; __pyx_L5_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_10syncPolicy_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self, PyObject *__pyx_v_syncPolicy) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; enum cudaSynchronizationPolicy __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); /* "cuda/bindings/runtime.pyx":12900 * @syncPolicy.setter * def syncPolicy(self, syncPolicy not None : cudaSynchronizationPolicy): * self._pvt_ptr[0].syncPolicy = syncPolicy.value # <<<<<<<<<<<<<< * @property * def clusterDim(self): */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_syncPolicy, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12900, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = ((enum cudaSynchronizationPolicy)__Pyx_PyLong_As_enum__cudaSynchronizationPolicy(__pyx_t_1)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 12900, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; (__pyx_v_self->_pvt_ptr[0]).syncPolicy = __pyx_t_2; /* "cuda/bindings/runtime.pyx":12898 * return None * return _dict_cudaSynchronizationPolicy[self._pvt_ptr[0].syncPolicy] * @syncPolicy.setter # <<<<<<<<<<<<<< * def syncPolicy(self, syncPolicy not None : cudaSynchronizationPolicy): * self._pvt_ptr[0].syncPolicy = syncPolicy.value */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.syncPolicy.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12901 * def syncPolicy(self, syncPolicy not None : cudaSynchronizationPolicy): * self._pvt_ptr[0].syncPolicy = syncPolicy.value * @property # <<<<<<<<<<<<<< * def clusterDim(self): * return self._clusterDim */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_10clusterDim_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_10clusterDim_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_10clusterDim___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_10clusterDim___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":12903 * @property * def clusterDim(self): * return self._clusterDim # <<<<<<<<<<<<<< * @clusterDim.setter * def clusterDim(self, clusterDim not None : anon_struct22): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF((PyObject *)__pyx_v_self->_clusterDim); __pyx_r = ((PyObject *)__pyx_v_self->_clusterDim); goto __pyx_L0; /* "cuda/bindings/runtime.pyx":12901 * def syncPolicy(self, syncPolicy not None : cudaSynchronizationPolicy): * self._pvt_ptr[0].syncPolicy = syncPolicy.value * @property # <<<<<<<<<<<<<< * def clusterDim(self): * return self._clusterDim */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12904 * def clusterDim(self): * return self._clusterDim * @clusterDim.setter # <<<<<<<<<<<<<< * def clusterDim(self, clusterDim not None : anon_struct22): * string.memcpy(&self._pvt_ptr[0].clusterDim, clusterDim.getPtr(), sizeof(self._pvt_ptr[0].clusterDim)) */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_10clusterDim_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_clusterDim); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_10clusterDim_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_clusterDim) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_clusterDim), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_anon_struct22, 0, "clusterDim", 0))) __PYX_ERR(0, 12905, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_10clusterDim_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self), ((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct22 *)__pyx_v_clusterDim)); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; goto __pyx_L5_cleaned_up; __pyx_L0:; __pyx_L5_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_10clusterDim_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self, struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct22 *__pyx_v_clusterDim) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; size_t __pyx_t_3; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); /* "cuda/bindings/runtime.pyx":12906 * @clusterDim.setter * def clusterDim(self, clusterDim not None : anon_struct22): * string.memcpy(&self._pvt_ptr[0].clusterDim, clusterDim.getPtr(), sizeof(self._pvt_ptr[0].clusterDim)) # <<<<<<<<<<<<<< * @property * def clusterSchedulingPolicyPreference(self): */ __pyx_t_2 = ((PyObject *)__pyx_v_clusterDim); __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCallMethod(__pyx_mstate_global->__pyx_n_u_getPtr, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12906, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 12906, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; (void)(memcpy((&(__pyx_v_self->_pvt_ptr[0]).clusterDim), ((struct anon_struct22 *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_4)), (sizeof((__pyx_v_self->_pvt_ptr[0]).clusterDim)))); /* "cuda/bindings/runtime.pyx":12904 * def clusterDim(self): * return self._clusterDim * @clusterDim.setter # <<<<<<<<<<<<<< * def clusterDim(self, clusterDim not None : anon_struct22): * string.memcpy(&self._pvt_ptr[0].clusterDim, clusterDim.getPtr(), sizeof(self._pvt_ptr[0].clusterDim)) */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.clusterDim.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12907 * def clusterDim(self, clusterDim not None : anon_struct22): * string.memcpy(&self._pvt_ptr[0].clusterDim, clusterDim.getPtr(), sizeof(self._pvt_ptr[0].clusterDim)) * @property # <<<<<<<<<<<<<< * def clusterSchedulingPolicyPreference(self): * if self._pvt_ptr[0].clusterSchedulingPolicyPreference not in _dict_cudaClusterSchedulingPolicy: */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_33clusterSchedulingPolicyPreference_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_33clusterSchedulingPolicyPreference_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_33clusterSchedulingPolicyPreference___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_33clusterSchedulingPolicyPreference___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":12909 * @property * def clusterSchedulingPolicyPreference(self): * if self._pvt_ptr[0].clusterSchedulingPolicyPreference not in _dict_cudaClusterSchedulingPolicy: # <<<<<<<<<<<<<< * return None * return _dict_cudaClusterSchedulingPolicy[self._pvt_ptr[0].clusterSchedulingPolicyPreference] */ __pyx_t_1 = __Pyx_PyLong_From_enum__cudaClusterSchedulingPolicy((__pyx_v_self->_pvt_ptr[0]).clusterSchedulingPolicyPreference); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12909, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaClusterSchedulingPolic); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12909, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = (__Pyx_PySequence_ContainsTF(__pyx_t_1, __pyx_t_2, Py_NE)); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 12909, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (__pyx_t_3) { /* "cuda/bindings/runtime.pyx":12910 * def clusterSchedulingPolicyPreference(self): * if self._pvt_ptr[0].clusterSchedulingPolicyPreference not in _dict_cudaClusterSchedulingPolicy: * return None # <<<<<<<<<<<<<< * return _dict_cudaClusterSchedulingPolicy[self._pvt_ptr[0].clusterSchedulingPolicyPreference] * @clusterSchedulingPolicyPreference.setter */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "cuda/bindings/runtime.pyx":12909 * @property * def clusterSchedulingPolicyPreference(self): * if self._pvt_ptr[0].clusterSchedulingPolicyPreference not in _dict_cudaClusterSchedulingPolicy: # <<<<<<<<<<<<<< * return None * return _dict_cudaClusterSchedulingPolicy[self._pvt_ptr[0].clusterSchedulingPolicyPreference] */ } /* "cuda/bindings/runtime.pyx":12911 * if self._pvt_ptr[0].clusterSchedulingPolicyPreference not in _dict_cudaClusterSchedulingPolicy: * return None * return _dict_cudaClusterSchedulingPolicy[self._pvt_ptr[0].clusterSchedulingPolicyPreference] # <<<<<<<<<<<<<< * @clusterSchedulingPolicyPreference.setter * def clusterSchedulingPolicyPreference(self, clusterSchedulingPolicyPreference not None : cudaClusterSchedulingPolicy): */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaClusterSchedulingPolic); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12911, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyLong_From_enum__cudaClusterSchedulingPolicy((__pyx_v_self->_pvt_ptr[0]).clusterSchedulingPolicyPreference); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12911, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_2, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12911, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":12907 * def clusterDim(self, clusterDim not None : anon_struct22): * string.memcpy(&self._pvt_ptr[0].clusterDim, clusterDim.getPtr(), sizeof(self._pvt_ptr[0].clusterDim)) * @property # <<<<<<<<<<<<<< * def clusterSchedulingPolicyPreference(self): * if self._pvt_ptr[0].clusterSchedulingPolicyPreference not in _dict_cudaClusterSchedulingPolicy: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.clusterSchedulingPolicyPreference.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12912 * return None * return _dict_cudaClusterSchedulingPolicy[self._pvt_ptr[0].clusterSchedulingPolicyPreference] * @clusterSchedulingPolicyPreference.setter # <<<<<<<<<<<<<< * def clusterSchedulingPolicyPreference(self, clusterSchedulingPolicyPreference not None : cudaClusterSchedulingPolicy): * self._pvt_ptr[0].clusterSchedulingPolicyPreference = clusterSchedulingPolicyPreference.value */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_33clusterSchedulingPolicyPreference_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_clusterSchedulingPolicyPreference); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_33clusterSchedulingPolicyPreference_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_clusterSchedulingPolicyPreference) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); if (unlikely(((PyObject *)__pyx_v_clusterSchedulingPolicyPreference) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "clusterSchedulingPolicyPreference"); __PYX_ERR(0, 12913, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_33clusterSchedulingPolicyPreference_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self), ((PyObject *)__pyx_v_clusterSchedulingPolicyPreference)); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; goto __pyx_L5_cleaned_up; __pyx_L0:; __pyx_L5_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_33clusterSchedulingPolicyPreference_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self, PyObject *__pyx_v_clusterSchedulingPolicyPreference) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; enum cudaClusterSchedulingPolicy __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); /* "cuda/bindings/runtime.pyx":12914 * @clusterSchedulingPolicyPreference.setter * def clusterSchedulingPolicyPreference(self, clusterSchedulingPolicyPreference not None : cudaClusterSchedulingPolicy): * self._pvt_ptr[0].clusterSchedulingPolicyPreference = clusterSchedulingPolicyPreference.value # <<<<<<<<<<<<<< * @property * def programmaticStreamSerializationAllowed(self): */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_clusterSchedulingPolicyPreference, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12914, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = ((enum cudaClusterSchedulingPolicy)__Pyx_PyLong_As_enum__cudaClusterSchedulingPolicy(__pyx_t_1)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 12914, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; (__pyx_v_self->_pvt_ptr[0]).clusterSchedulingPolicyPreference = __pyx_t_2; /* "cuda/bindings/runtime.pyx":12912 * return None * return _dict_cudaClusterSchedulingPolicy[self._pvt_ptr[0].clusterSchedulingPolicyPreference] * @clusterSchedulingPolicyPreference.setter # <<<<<<<<<<<<<< * def clusterSchedulingPolicyPreference(self, clusterSchedulingPolicyPreference not None : cudaClusterSchedulingPolicy): * self._pvt_ptr[0].clusterSchedulingPolicyPreference = clusterSchedulingPolicyPreference.value */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.clusterSchedulingPolicyPreference.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12915 * def clusterSchedulingPolicyPreference(self, clusterSchedulingPolicyPreference not None : cudaClusterSchedulingPolicy): * self._pvt_ptr[0].clusterSchedulingPolicyPreference = clusterSchedulingPolicyPreference.value * @property # <<<<<<<<<<<<<< * def programmaticStreamSerializationAllowed(self): * return self._pvt_ptr[0].programmaticStreamSerializationAllowed */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_38programmaticStreamSerializationAllowed_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_38programmaticStreamSerializationAllowed_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_38programmaticStreamSerializationAllowed___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_38programmaticStreamSerializationAllowed___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":12917 * @property * def programmaticStreamSerializationAllowed(self): * return self._pvt_ptr[0].programmaticStreamSerializationAllowed # <<<<<<<<<<<<<< * @programmaticStreamSerializationAllowed.setter * def programmaticStreamSerializationAllowed(self, int programmaticStreamSerializationAllowed): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_int((__pyx_v_self->_pvt_ptr[0]).programmaticStreamSerializationAllowed); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12917, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":12915 * def clusterSchedulingPolicyPreference(self, clusterSchedulingPolicyPreference not None : cudaClusterSchedulingPolicy): * self._pvt_ptr[0].clusterSchedulingPolicyPreference = clusterSchedulingPolicyPreference.value * @property # <<<<<<<<<<<<<< * def programmaticStreamSerializationAllowed(self): * return self._pvt_ptr[0].programmaticStreamSerializationAllowed */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.programmaticStreamSerializationAllowed.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12918 * def programmaticStreamSerializationAllowed(self): * return self._pvt_ptr[0].programmaticStreamSerializationAllowed * @programmaticStreamSerializationAllowed.setter # <<<<<<<<<<<<<< * def programmaticStreamSerializationAllowed(self, int programmaticStreamSerializationAllowed): * self._pvt_ptr[0].programmaticStreamSerializationAllowed = programmaticStreamSerializationAllowed */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_38programmaticStreamSerializationAllowed_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_programmaticStreamSerializationAllowed); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_38programmaticStreamSerializationAllowed_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_programmaticStreamSerializationAllowed) { int __pyx_v_programmaticStreamSerializationAllowed; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); assert(__pyx_arg_programmaticStreamSerializationAllowed); { __pyx_v_programmaticStreamSerializationAllowed = __Pyx_PyLong_As_int(__pyx_arg_programmaticStreamSerializationAllowed); if (unlikely((__pyx_v_programmaticStreamSerializationAllowed == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12919, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.programmaticStreamSerializationAllowed.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_38programmaticStreamSerializationAllowed_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self), ((int)__pyx_v_programmaticStreamSerializationAllowed)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_38programmaticStreamSerializationAllowed_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self, int __pyx_v_programmaticStreamSerializationAllowed) { int __pyx_r; /* "cuda/bindings/runtime.pyx":12920 * @programmaticStreamSerializationAllowed.setter * def programmaticStreamSerializationAllowed(self, int programmaticStreamSerializationAllowed): * self._pvt_ptr[0].programmaticStreamSerializationAllowed = programmaticStreamSerializationAllowed # <<<<<<<<<<<<<< * @property * def programmaticEvent(self): */ (__pyx_v_self->_pvt_ptr[0]).programmaticStreamSerializationAllowed = __pyx_v_programmaticStreamSerializationAllowed; /* "cuda/bindings/runtime.pyx":12918 * def programmaticStreamSerializationAllowed(self): * return self._pvt_ptr[0].programmaticStreamSerializationAllowed * @programmaticStreamSerializationAllowed.setter # <<<<<<<<<<<<<< * def programmaticStreamSerializationAllowed(self, int programmaticStreamSerializationAllowed): * self._pvt_ptr[0].programmaticStreamSerializationAllowed = programmaticStreamSerializationAllowed */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":12921 * def programmaticStreamSerializationAllowed(self, int programmaticStreamSerializationAllowed): * self._pvt_ptr[0].programmaticStreamSerializationAllowed = programmaticStreamSerializationAllowed * @property # <<<<<<<<<<<<<< * def programmaticEvent(self): * return self._programmaticEvent */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_17programmaticEvent_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_17programmaticEvent_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_17programmaticEvent___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_17programmaticEvent___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":12923 * @property * def programmaticEvent(self): * return self._programmaticEvent # <<<<<<<<<<<<<< * @programmaticEvent.setter * def programmaticEvent(self, programmaticEvent not None : anon_struct23): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF((PyObject *)__pyx_v_self->_programmaticEvent); __pyx_r = ((PyObject *)__pyx_v_self->_programmaticEvent); goto __pyx_L0; /* "cuda/bindings/runtime.pyx":12921 * def programmaticStreamSerializationAllowed(self, int programmaticStreamSerializationAllowed): * self._pvt_ptr[0].programmaticStreamSerializationAllowed = programmaticStreamSerializationAllowed * @property # <<<<<<<<<<<<<< * def programmaticEvent(self): * return self._programmaticEvent */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12924 * def programmaticEvent(self): * return self._programmaticEvent * @programmaticEvent.setter # <<<<<<<<<<<<<< * def programmaticEvent(self, programmaticEvent not None : anon_struct23): * string.memcpy(&self._pvt_ptr[0].programmaticEvent, programmaticEvent.getPtr(), sizeof(self._pvt_ptr[0].programmaticEvent)) */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_17programmaticEvent_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_programmaticEvent); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_17programmaticEvent_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_programmaticEvent) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_programmaticEvent), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_anon_struct23, 0, "programmaticEvent", 0))) __PYX_ERR(0, 12925, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_17programmaticEvent_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self), ((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct23 *)__pyx_v_programmaticEvent)); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; goto __pyx_L5_cleaned_up; __pyx_L0:; __pyx_L5_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_17programmaticEvent_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self, struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct23 *__pyx_v_programmaticEvent) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; size_t __pyx_t_3; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); /* "cuda/bindings/runtime.pyx":12926 * @programmaticEvent.setter * def programmaticEvent(self, programmaticEvent not None : anon_struct23): * string.memcpy(&self._pvt_ptr[0].programmaticEvent, programmaticEvent.getPtr(), sizeof(self._pvt_ptr[0].programmaticEvent)) # <<<<<<<<<<<<<< * @property * def priority(self): */ __pyx_t_2 = ((PyObject *)__pyx_v_programmaticEvent); __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCallMethod(__pyx_mstate_global->__pyx_n_u_getPtr, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12926, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 12926, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; (void)(memcpy((&(__pyx_v_self->_pvt_ptr[0]).programmaticEvent), ((struct anon_struct23 *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_4)), (sizeof((__pyx_v_self->_pvt_ptr[0]).programmaticEvent)))); /* "cuda/bindings/runtime.pyx":12924 * def programmaticEvent(self): * return self._programmaticEvent * @programmaticEvent.setter # <<<<<<<<<<<<<< * def programmaticEvent(self, programmaticEvent not None : anon_struct23): * string.memcpy(&self._pvt_ptr[0].programmaticEvent, programmaticEvent.getPtr(), sizeof(self._pvt_ptr[0].programmaticEvent)) */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.programmaticEvent.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12927 * def programmaticEvent(self, programmaticEvent not None : anon_struct23): * string.memcpy(&self._pvt_ptr[0].programmaticEvent, programmaticEvent.getPtr(), sizeof(self._pvt_ptr[0].programmaticEvent)) * @property # <<<<<<<<<<<<<< * def priority(self): * return self._pvt_ptr[0].priority */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_8priority_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_8priority_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_8priority___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_8priority___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":12929 * @property * def priority(self): * return self._pvt_ptr[0].priority # <<<<<<<<<<<<<< * @priority.setter * def priority(self, int priority): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_int((__pyx_v_self->_pvt_ptr[0]).priority); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12929, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":12927 * def programmaticEvent(self, programmaticEvent not None : anon_struct23): * string.memcpy(&self._pvt_ptr[0].programmaticEvent, programmaticEvent.getPtr(), sizeof(self._pvt_ptr[0].programmaticEvent)) * @property # <<<<<<<<<<<<<< * def priority(self): * return self._pvt_ptr[0].priority */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.priority.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12930 * def priority(self): * return self._pvt_ptr[0].priority * @priority.setter # <<<<<<<<<<<<<< * def priority(self, int priority): * self._pvt_ptr[0].priority = priority */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_8priority_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_priority); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_8priority_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_priority) { int __pyx_v_priority; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); assert(__pyx_arg_priority); { __pyx_v_priority = __Pyx_PyLong_As_int(__pyx_arg_priority); if (unlikely((__pyx_v_priority == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12931, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.priority.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_8priority_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self), ((int)__pyx_v_priority)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_8priority_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self, int __pyx_v_priority) { int __pyx_r; /* "cuda/bindings/runtime.pyx":12932 * @priority.setter * def priority(self, int priority): * self._pvt_ptr[0].priority = priority # <<<<<<<<<<<<<< * @property * def memSyncDomainMap(self): */ (__pyx_v_self->_pvt_ptr[0]).priority = __pyx_v_priority; /* "cuda/bindings/runtime.pyx":12930 * def priority(self): * return self._pvt_ptr[0].priority * @priority.setter # <<<<<<<<<<<<<< * def priority(self, int priority): * self._pvt_ptr[0].priority = priority */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":12933 * def priority(self, int priority): * self._pvt_ptr[0].priority = priority * @property # <<<<<<<<<<<<<< * def memSyncDomainMap(self): * return self._memSyncDomainMap */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_16memSyncDomainMap_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_16memSyncDomainMap_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_16memSyncDomainMap___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_16memSyncDomainMap___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":12935 * @property * def memSyncDomainMap(self): * return self._memSyncDomainMap # <<<<<<<<<<<<<< * @memSyncDomainMap.setter * def memSyncDomainMap(self, memSyncDomainMap not None : cudaLaunchMemSyncDomainMap): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF((PyObject *)__pyx_v_self->_memSyncDomainMap); __pyx_r = ((PyObject *)__pyx_v_self->_memSyncDomainMap); goto __pyx_L0; /* "cuda/bindings/runtime.pyx":12933 * def priority(self, int priority): * self._pvt_ptr[0].priority = priority * @property # <<<<<<<<<<<<<< * def memSyncDomainMap(self): * return self._memSyncDomainMap */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12936 * def memSyncDomainMap(self): * return self._memSyncDomainMap * @memSyncDomainMap.setter # <<<<<<<<<<<<<< * def memSyncDomainMap(self, memSyncDomainMap not None : cudaLaunchMemSyncDomainMap): * string.memcpy(&self._pvt_ptr[0].memSyncDomainMap, memSyncDomainMap.getPtr(), sizeof(self._pvt_ptr[0].memSyncDomainMap)) */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_16memSyncDomainMap_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_memSyncDomainMap); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_16memSyncDomainMap_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_memSyncDomainMap) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_memSyncDomainMap), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaLaunchMemSyncDomainMap, 0, "memSyncDomainMap", 0))) __PYX_ERR(0, 12937, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_16memSyncDomainMap_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self), ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchMemSyncDomainMap *)__pyx_v_memSyncDomainMap)); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; goto __pyx_L5_cleaned_up; __pyx_L0:; __pyx_L5_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_16memSyncDomainMap_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self, struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchMemSyncDomainMap *__pyx_v_memSyncDomainMap) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; size_t __pyx_t_3; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); /* "cuda/bindings/runtime.pyx":12938 * @memSyncDomainMap.setter * def memSyncDomainMap(self, memSyncDomainMap not None : cudaLaunchMemSyncDomainMap): * string.memcpy(&self._pvt_ptr[0].memSyncDomainMap, memSyncDomainMap.getPtr(), sizeof(self._pvt_ptr[0].memSyncDomainMap)) # <<<<<<<<<<<<<< * @property * def memSyncDomain(self): */ __pyx_t_2 = ((PyObject *)__pyx_v_memSyncDomainMap); __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCallMethod(__pyx_mstate_global->__pyx_n_u_getPtr, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12938, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 12938, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; (void)(memcpy((&(__pyx_v_self->_pvt_ptr[0]).memSyncDomainMap), ((cudaLaunchMemSyncDomainMap *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_4)), (sizeof((__pyx_v_self->_pvt_ptr[0]).memSyncDomainMap)))); /* "cuda/bindings/runtime.pyx":12936 * def memSyncDomainMap(self): * return self._memSyncDomainMap * @memSyncDomainMap.setter # <<<<<<<<<<<<<< * def memSyncDomainMap(self, memSyncDomainMap not None : cudaLaunchMemSyncDomainMap): * string.memcpy(&self._pvt_ptr[0].memSyncDomainMap, memSyncDomainMap.getPtr(), sizeof(self._pvt_ptr[0].memSyncDomainMap)) */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.memSyncDomainMap.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12939 * def memSyncDomainMap(self, memSyncDomainMap not None : cudaLaunchMemSyncDomainMap): * string.memcpy(&self._pvt_ptr[0].memSyncDomainMap, memSyncDomainMap.getPtr(), sizeof(self._pvt_ptr[0].memSyncDomainMap)) * @property # <<<<<<<<<<<<<< * def memSyncDomain(self): * if self._pvt_ptr[0].memSyncDomain not in _dict_cudaLaunchMemSyncDomain: */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_13memSyncDomain_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_13memSyncDomain_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_13memSyncDomain___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_13memSyncDomain___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":12941 * @property * def memSyncDomain(self): * if self._pvt_ptr[0].memSyncDomain not in _dict_cudaLaunchMemSyncDomain: # <<<<<<<<<<<<<< * return None * return _dict_cudaLaunchMemSyncDomain[self._pvt_ptr[0].memSyncDomain] */ __pyx_t_1 = __Pyx_PyLong_From_enum__cudaLaunchMemSyncDomain((__pyx_v_self->_pvt_ptr[0]).memSyncDomain); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12941, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaLaunchMemSyncDomain); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12941, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = (__Pyx_PySequence_ContainsTF(__pyx_t_1, __pyx_t_2, Py_NE)); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 12941, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (__pyx_t_3) { /* "cuda/bindings/runtime.pyx":12942 * def memSyncDomain(self): * if self._pvt_ptr[0].memSyncDomain not in _dict_cudaLaunchMemSyncDomain: * return None # <<<<<<<<<<<<<< * return _dict_cudaLaunchMemSyncDomain[self._pvt_ptr[0].memSyncDomain] * @memSyncDomain.setter */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "cuda/bindings/runtime.pyx":12941 * @property * def memSyncDomain(self): * if self._pvt_ptr[0].memSyncDomain not in _dict_cudaLaunchMemSyncDomain: # <<<<<<<<<<<<<< * return None * return _dict_cudaLaunchMemSyncDomain[self._pvt_ptr[0].memSyncDomain] */ } /* "cuda/bindings/runtime.pyx":12943 * if self._pvt_ptr[0].memSyncDomain not in _dict_cudaLaunchMemSyncDomain: * return None * return _dict_cudaLaunchMemSyncDomain[self._pvt_ptr[0].memSyncDomain] # <<<<<<<<<<<<<< * @memSyncDomain.setter * def memSyncDomain(self, memSyncDomain not None : cudaLaunchMemSyncDomain): */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaLaunchMemSyncDomain); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12943, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyLong_From_enum__cudaLaunchMemSyncDomain((__pyx_v_self->_pvt_ptr[0]).memSyncDomain); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12943, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_2, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12943, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":12939 * def memSyncDomainMap(self, memSyncDomainMap not None : cudaLaunchMemSyncDomainMap): * string.memcpy(&self._pvt_ptr[0].memSyncDomainMap, memSyncDomainMap.getPtr(), sizeof(self._pvt_ptr[0].memSyncDomainMap)) * @property # <<<<<<<<<<<<<< * def memSyncDomain(self): * if self._pvt_ptr[0].memSyncDomain not in _dict_cudaLaunchMemSyncDomain: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.memSyncDomain.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12944 * return None * return _dict_cudaLaunchMemSyncDomain[self._pvt_ptr[0].memSyncDomain] * @memSyncDomain.setter # <<<<<<<<<<<<<< * def memSyncDomain(self, memSyncDomain not None : cudaLaunchMemSyncDomain): * self._pvt_ptr[0].memSyncDomain = memSyncDomain.value */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_13memSyncDomain_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_memSyncDomain); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_13memSyncDomain_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_memSyncDomain) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); if (unlikely(((PyObject *)__pyx_v_memSyncDomain) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "memSyncDomain"); __PYX_ERR(0, 12945, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_13memSyncDomain_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self), ((PyObject *)__pyx_v_memSyncDomain)); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; goto __pyx_L5_cleaned_up; __pyx_L0:; __pyx_L5_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_13memSyncDomain_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self, PyObject *__pyx_v_memSyncDomain) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; enum cudaLaunchMemSyncDomain __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); /* "cuda/bindings/runtime.pyx":12946 * @memSyncDomain.setter * def memSyncDomain(self, memSyncDomain not None : cudaLaunchMemSyncDomain): * self._pvt_ptr[0].memSyncDomain = memSyncDomain.value # <<<<<<<<<<<<<< * @property * def preferredClusterDim(self): */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_memSyncDomain, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12946, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = ((enum cudaLaunchMemSyncDomain)__Pyx_PyLong_As_enum__cudaLaunchMemSyncDomain(__pyx_t_1)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 12946, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; (__pyx_v_self->_pvt_ptr[0]).memSyncDomain = __pyx_t_2; /* "cuda/bindings/runtime.pyx":12944 * return None * return _dict_cudaLaunchMemSyncDomain[self._pvt_ptr[0].memSyncDomain] * @memSyncDomain.setter # <<<<<<<<<<<<<< * def memSyncDomain(self, memSyncDomain not None : cudaLaunchMemSyncDomain): * self._pvt_ptr[0].memSyncDomain = memSyncDomain.value */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.memSyncDomain.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12947 * def memSyncDomain(self, memSyncDomain not None : cudaLaunchMemSyncDomain): * self._pvt_ptr[0].memSyncDomain = memSyncDomain.value * @property # <<<<<<<<<<<<<< * def preferredClusterDim(self): * return self._preferredClusterDim */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_19preferredClusterDim_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_19preferredClusterDim_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_19preferredClusterDim___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_19preferredClusterDim___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":12949 * @property * def preferredClusterDim(self): * return self._preferredClusterDim # <<<<<<<<<<<<<< * @preferredClusterDim.setter * def preferredClusterDim(self, preferredClusterDim not None : anon_struct24): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF((PyObject *)__pyx_v_self->_preferredClusterDim); __pyx_r = ((PyObject *)__pyx_v_self->_preferredClusterDim); goto __pyx_L0; /* "cuda/bindings/runtime.pyx":12947 * def memSyncDomain(self, memSyncDomain not None : cudaLaunchMemSyncDomain): * self._pvt_ptr[0].memSyncDomain = memSyncDomain.value * @property # <<<<<<<<<<<<<< * def preferredClusterDim(self): * return self._preferredClusterDim */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12950 * def preferredClusterDim(self): * return self._preferredClusterDim * @preferredClusterDim.setter # <<<<<<<<<<<<<< * def preferredClusterDim(self, preferredClusterDim not None : anon_struct24): * string.memcpy(&self._pvt_ptr[0].preferredClusterDim, preferredClusterDim.getPtr(), sizeof(self._pvt_ptr[0].preferredClusterDim)) */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_19preferredClusterDim_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_preferredClusterDim); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_19preferredClusterDim_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_preferredClusterDim) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_preferredClusterDim), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_anon_struct24, 0, "preferredClusterDim", 0))) __PYX_ERR(0, 12951, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_19preferredClusterDim_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self), ((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct24 *)__pyx_v_preferredClusterDim)); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; goto __pyx_L5_cleaned_up; __pyx_L0:; __pyx_L5_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_19preferredClusterDim_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self, struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct24 *__pyx_v_preferredClusterDim) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; size_t __pyx_t_3; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); /* "cuda/bindings/runtime.pyx":12952 * @preferredClusterDim.setter * def preferredClusterDim(self, preferredClusterDim not None : anon_struct24): * string.memcpy(&self._pvt_ptr[0].preferredClusterDim, preferredClusterDim.getPtr(), sizeof(self._pvt_ptr[0].preferredClusterDim)) # <<<<<<<<<<<<<< * @property * def launchCompletionEvent(self): */ __pyx_t_2 = ((PyObject *)__pyx_v_preferredClusterDim); __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCallMethod(__pyx_mstate_global->__pyx_n_u_getPtr, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12952, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 12952, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; (void)(memcpy((&(__pyx_v_self->_pvt_ptr[0]).preferredClusterDim), ((struct anon_struct24 *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_4)), (sizeof((__pyx_v_self->_pvt_ptr[0]).preferredClusterDim)))); /* "cuda/bindings/runtime.pyx":12950 * def preferredClusterDim(self): * return self._preferredClusterDim * @preferredClusterDim.setter # <<<<<<<<<<<<<< * def preferredClusterDim(self, preferredClusterDim not None : anon_struct24): * string.memcpy(&self._pvt_ptr[0].preferredClusterDim, preferredClusterDim.getPtr(), sizeof(self._pvt_ptr[0].preferredClusterDim)) */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.preferredClusterDim.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12953 * def preferredClusterDim(self, preferredClusterDim not None : anon_struct24): * string.memcpy(&self._pvt_ptr[0].preferredClusterDim, preferredClusterDim.getPtr(), sizeof(self._pvt_ptr[0].preferredClusterDim)) * @property # <<<<<<<<<<<<<< * def launchCompletionEvent(self): * return self._launchCompletionEvent */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_21launchCompletionEvent_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_21launchCompletionEvent_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_21launchCompletionEvent___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_21launchCompletionEvent___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":12955 * @property * def launchCompletionEvent(self): * return self._launchCompletionEvent # <<<<<<<<<<<<<< * @launchCompletionEvent.setter * def launchCompletionEvent(self, launchCompletionEvent not None : anon_struct25): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF((PyObject *)__pyx_v_self->_launchCompletionEvent); __pyx_r = ((PyObject *)__pyx_v_self->_launchCompletionEvent); goto __pyx_L0; /* "cuda/bindings/runtime.pyx":12953 * def preferredClusterDim(self, preferredClusterDim not None : anon_struct24): * string.memcpy(&self._pvt_ptr[0].preferredClusterDim, preferredClusterDim.getPtr(), sizeof(self._pvt_ptr[0].preferredClusterDim)) * @property # <<<<<<<<<<<<<< * def launchCompletionEvent(self): * return self._launchCompletionEvent */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12956 * def launchCompletionEvent(self): * return self._launchCompletionEvent * @launchCompletionEvent.setter # <<<<<<<<<<<<<< * def launchCompletionEvent(self, launchCompletionEvent not None : anon_struct25): * string.memcpy(&self._pvt_ptr[0].launchCompletionEvent, launchCompletionEvent.getPtr(), sizeof(self._pvt_ptr[0].launchCompletionEvent)) */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_21launchCompletionEvent_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_launchCompletionEvent); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_21launchCompletionEvent_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_launchCompletionEvent) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_launchCompletionEvent), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_anon_struct25, 0, "launchCompletionEvent", 0))) __PYX_ERR(0, 12957, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_21launchCompletionEvent_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self), ((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct25 *)__pyx_v_launchCompletionEvent)); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; goto __pyx_L5_cleaned_up; __pyx_L0:; __pyx_L5_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_21launchCompletionEvent_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self, struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct25 *__pyx_v_launchCompletionEvent) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; size_t __pyx_t_3; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); /* "cuda/bindings/runtime.pyx":12958 * @launchCompletionEvent.setter * def launchCompletionEvent(self, launchCompletionEvent not None : anon_struct25): * string.memcpy(&self._pvt_ptr[0].launchCompletionEvent, launchCompletionEvent.getPtr(), sizeof(self._pvt_ptr[0].launchCompletionEvent)) # <<<<<<<<<<<<<< * @property * def deviceUpdatableKernelNode(self): */ __pyx_t_2 = ((PyObject *)__pyx_v_launchCompletionEvent); __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCallMethod(__pyx_mstate_global->__pyx_n_u_getPtr, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12958, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 12958, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; (void)(memcpy((&(__pyx_v_self->_pvt_ptr[0]).launchCompletionEvent), ((struct anon_struct25 *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_4)), (sizeof((__pyx_v_self->_pvt_ptr[0]).launchCompletionEvent)))); /* "cuda/bindings/runtime.pyx":12956 * def launchCompletionEvent(self): * return self._launchCompletionEvent * @launchCompletionEvent.setter # <<<<<<<<<<<<<< * def launchCompletionEvent(self, launchCompletionEvent not None : anon_struct25): * string.memcpy(&self._pvt_ptr[0].launchCompletionEvent, launchCompletionEvent.getPtr(), sizeof(self._pvt_ptr[0].launchCompletionEvent)) */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.launchCompletionEvent.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12959 * def launchCompletionEvent(self, launchCompletionEvent not None : anon_struct25): * string.memcpy(&self._pvt_ptr[0].launchCompletionEvent, launchCompletionEvent.getPtr(), sizeof(self._pvt_ptr[0].launchCompletionEvent)) * @property # <<<<<<<<<<<<<< * def deviceUpdatableKernelNode(self): * return self._deviceUpdatableKernelNode */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_25deviceUpdatableKernelNode_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_25deviceUpdatableKernelNode_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_25deviceUpdatableKernelNode___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_25deviceUpdatableKernelNode___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":12961 * @property * def deviceUpdatableKernelNode(self): * return self._deviceUpdatableKernelNode # <<<<<<<<<<<<<< * @deviceUpdatableKernelNode.setter * def deviceUpdatableKernelNode(self, deviceUpdatableKernelNode not None : anon_struct26): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF((PyObject *)__pyx_v_self->_deviceUpdatableKernelNode); __pyx_r = ((PyObject *)__pyx_v_self->_deviceUpdatableKernelNode); goto __pyx_L0; /* "cuda/bindings/runtime.pyx":12959 * def launchCompletionEvent(self, launchCompletionEvent not None : anon_struct25): * string.memcpy(&self._pvt_ptr[0].launchCompletionEvent, launchCompletionEvent.getPtr(), sizeof(self._pvt_ptr[0].launchCompletionEvent)) * @property # <<<<<<<<<<<<<< * def deviceUpdatableKernelNode(self): * return self._deviceUpdatableKernelNode */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12962 * def deviceUpdatableKernelNode(self): * return self._deviceUpdatableKernelNode * @deviceUpdatableKernelNode.setter # <<<<<<<<<<<<<< * def deviceUpdatableKernelNode(self, deviceUpdatableKernelNode not None : anon_struct26): * string.memcpy(&self._pvt_ptr[0].deviceUpdatableKernelNode, deviceUpdatableKernelNode.getPtr(), sizeof(self._pvt_ptr[0].deviceUpdatableKernelNode)) */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_25deviceUpdatableKernelNode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_deviceUpdatableKernelNode); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_25deviceUpdatableKernelNode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_deviceUpdatableKernelNode) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_deviceUpdatableKernelNode), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_anon_struct26, 0, "deviceUpdatableKernelNode", 0))) __PYX_ERR(0, 12963, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_25deviceUpdatableKernelNode_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self), ((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct26 *)__pyx_v_deviceUpdatableKernelNode)); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; goto __pyx_L5_cleaned_up; __pyx_L0:; __pyx_L5_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_25deviceUpdatableKernelNode_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self, struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct26 *__pyx_v_deviceUpdatableKernelNode) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; size_t __pyx_t_3; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); /* "cuda/bindings/runtime.pyx":12964 * @deviceUpdatableKernelNode.setter * def deviceUpdatableKernelNode(self, deviceUpdatableKernelNode not None : anon_struct26): * string.memcpy(&self._pvt_ptr[0].deviceUpdatableKernelNode, deviceUpdatableKernelNode.getPtr(), sizeof(self._pvt_ptr[0].deviceUpdatableKernelNode)) # <<<<<<<<<<<<<< * @property * def sharedMemCarveout(self): */ __pyx_t_2 = ((PyObject *)__pyx_v_deviceUpdatableKernelNode); __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCallMethod(__pyx_mstate_global->__pyx_n_u_getPtr, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12964, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 12964, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; (void)(memcpy((&(__pyx_v_self->_pvt_ptr[0]).deviceUpdatableKernelNode), ((struct anon_struct26 *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_4)), (sizeof((__pyx_v_self->_pvt_ptr[0]).deviceUpdatableKernelNode)))); /* "cuda/bindings/runtime.pyx":12962 * def deviceUpdatableKernelNode(self): * return self._deviceUpdatableKernelNode * @deviceUpdatableKernelNode.setter # <<<<<<<<<<<<<< * def deviceUpdatableKernelNode(self, deviceUpdatableKernelNode not None : anon_struct26): * string.memcpy(&self._pvt_ptr[0].deviceUpdatableKernelNode, deviceUpdatableKernelNode.getPtr(), sizeof(self._pvt_ptr[0].deviceUpdatableKernelNode)) */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.deviceUpdatableKernelNode.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12965 * def deviceUpdatableKernelNode(self, deviceUpdatableKernelNode not None : anon_struct26): * string.memcpy(&self._pvt_ptr[0].deviceUpdatableKernelNode, deviceUpdatableKernelNode.getPtr(), sizeof(self._pvt_ptr[0].deviceUpdatableKernelNode)) * @property # <<<<<<<<<<<<<< * def sharedMemCarveout(self): * return self._pvt_ptr[0].sharedMemCarveout */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_17sharedMemCarveout_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_17sharedMemCarveout_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_17sharedMemCarveout___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_17sharedMemCarveout___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":12967 * @property * def sharedMemCarveout(self): * return self._pvt_ptr[0].sharedMemCarveout # <<<<<<<<<<<<<< * @sharedMemCarveout.setter * def sharedMemCarveout(self, unsigned int sharedMemCarveout): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_pvt_ptr[0]).sharedMemCarveout); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12967, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":12965 * def deviceUpdatableKernelNode(self, deviceUpdatableKernelNode not None : anon_struct26): * string.memcpy(&self._pvt_ptr[0].deviceUpdatableKernelNode, deviceUpdatableKernelNode.getPtr(), sizeof(self._pvt_ptr[0].deviceUpdatableKernelNode)) * @property # <<<<<<<<<<<<<< * def sharedMemCarveout(self): * return self._pvt_ptr[0].sharedMemCarveout */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.sharedMemCarveout.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12968 * def sharedMemCarveout(self): * return self._pvt_ptr[0].sharedMemCarveout * @sharedMemCarveout.setter # <<<<<<<<<<<<<< * def sharedMemCarveout(self, unsigned int sharedMemCarveout): * self._pvt_ptr[0].sharedMemCarveout = sharedMemCarveout */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_17sharedMemCarveout_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_sharedMemCarveout); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_17sharedMemCarveout_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_sharedMemCarveout) { unsigned int __pyx_v_sharedMemCarveout; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); assert(__pyx_arg_sharedMemCarveout); { __pyx_v_sharedMemCarveout = __Pyx_PyLong_As_unsigned_int(__pyx_arg_sharedMemCarveout); if (unlikely((__pyx_v_sharedMemCarveout == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 12969, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.sharedMemCarveout.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_17sharedMemCarveout_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self), ((unsigned int)__pyx_v_sharedMemCarveout)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_17sharedMemCarveout_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self, unsigned int __pyx_v_sharedMemCarveout) { int __pyx_r; /* "cuda/bindings/runtime.pyx":12970 * @sharedMemCarveout.setter * def sharedMemCarveout(self, unsigned int sharedMemCarveout): * self._pvt_ptr[0].sharedMemCarveout = sharedMemCarveout # <<<<<<<<<<<<<< * * cdef class cudaLaunchAttribute_st: */ (__pyx_v_self->_pvt_ptr[0]).sharedMemCarveout = __pyx_v_sharedMemCarveout; /* "cuda/bindings/runtime.pyx":12968 * def sharedMemCarveout(self): * return self._pvt_ptr[0].sharedMemCarveout * @sharedMemCarveout.setter # <<<<<<<<<<<<<< * def sharedMemCarveout(self, unsigned int sharedMemCarveout): * self._pvt_ptr[0].sharedMemCarveout = sharedMemCarveout */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_10__reduce_cython__, "cudaLaunchAttributeValue.__reduce_cython__(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_11__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_11__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_10__reduce_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_10__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_10__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_12__setstate_cython__, "cudaLaunchAttributeValue.__setstate_cython__(self, __pyx_state)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_13__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_13__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_12__setstate_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(2, 3, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(2, 3, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(2, 3, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) } __pyx_v___pyx_state = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(2, 3, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_12__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_self), __pyx_v___pyx_state); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_24cudaLaunchAttributeValue_12__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttributeValue.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12988 * Get memory address of class instance * """ * def __cinit__(self, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * if _ptr == 0: * self._pvt_ptr = &self._pvt_val */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr; CYTHON_UNUSED Py_ssize_t __pyx_nargs; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1; #endif __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 12988, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12988, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__cinit__", 0) < (0)) __PYX_ERR(0, 12988, __pyx_L3_error) } else { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12988, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } } if (values[0]) { __pyx_v__ptr = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[0]); if (unlikely((__pyx_v__ptr == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 12988, __pyx_L3_error) } else { __pyx_v__ptr = ((__pyx_t_4cuda_8bindings_7runtime_void_ptr)0); } } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 12988, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttribute_st.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st___cinit__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttribute_st *)__pyx_v_self), __pyx_v__ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st___cinit__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttribute_st *__pyx_v_self, __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr) { int __pyx_r; int __pyx_t_1; /* "cuda/bindings/runtime.pyx":12989 * """ * def __cinit__(self, void_ptr _ptr = 0): * if _ptr == 0: # <<<<<<<<<<<<<< * self._pvt_ptr = &self._pvt_val * else: */ __pyx_t_1 = (__pyx_v__ptr == 0); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":12990 * def __cinit__(self, void_ptr _ptr = 0): * if _ptr == 0: * self._pvt_ptr = &self._pvt_val # <<<<<<<<<<<<<< * else: * self._pvt_ptr = _ptr */ __pyx_v_self->_pvt_ptr = (&__pyx_v_self->_pvt_val); /* "cuda/bindings/runtime.pyx":12989 * """ * def __cinit__(self, void_ptr _ptr = 0): * if _ptr == 0: # <<<<<<<<<<<<<< * self._pvt_ptr = &self._pvt_val * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":12992 * self._pvt_ptr = &self._pvt_val * else: * self._pvt_ptr = _ptr # <<<<<<<<<<<<<< * def __init__(self, void_ptr _ptr = 0): * pass */ /*else*/ { __pyx_v_self->_pvt_ptr = ((struct cudaLaunchAttribute_st *)__pyx_v__ptr); } __pyx_L3:; /* "cuda/bindings/runtime.pyx":12988 * Get memory address of class instance * """ * def __cinit__(self, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * if _ptr == 0: * self._pvt_ptr = &self._pvt_val */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":12993 * else: * self._pvt_ptr = _ptr * def __init__(self, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * pass * self._val = cudaLaunchAttributeValue(_ptr=&self._pvt_ptr[0].val) */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_3__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_3__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr; CYTHON_UNUSED Py_ssize_t __pyx_nargs; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1; #endif __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 12993, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12993, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__init__", 0) < (0)) __PYX_ERR(0, 12993, __pyx_L3_error) } else { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 12993, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } } if (values[0]) { __pyx_v__ptr = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[0]); if (unlikely((__pyx_v__ptr == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 12993, __pyx_L3_error) } else { __pyx_v__ptr = ((__pyx_t_4cuda_8bindings_7runtime_void_ptr)0); } } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 12993, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttribute_st.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_2__init__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttribute_st *)__pyx_v_self), __pyx_v__ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_2__init__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttribute_st *__pyx_v_self, CYTHON_UNUSED __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__init__", 0); /* "cuda/bindings/runtime.pyx":12995 * def __init__(self, void_ptr _ptr = 0): * pass * self._val = cudaLaunchAttributeValue(_ptr=&self._pvt_ptr[0].val) # <<<<<<<<<<<<<< * def __dealloc__(self): * pass */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaLaunchAttributeValue); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaLaunchAttributeValue); __pyx_t_4 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)(&(__pyx_v_self->_pvt_ptr[0]).val))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 12995, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_2, NULL}; __pyx_t_6 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 12995, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_ptr_2, __pyx_t_4, __pyx_t_6, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 12995, __pyx_L1_error) __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder(__pyx_t_3, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_6); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12995, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __Pyx_GIVEREF((PyObject *)__pyx_t_1); __Pyx_GOTREF((PyObject *)__pyx_v_self->_val); __Pyx_DECREF((PyObject *)__pyx_v_self->_val); __pyx_v_self->_val = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":12993 * else: * self._pvt_ptr = _ptr * def __init__(self, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * pass * self._val = cudaLaunchAttributeValue(_ptr=&self._pvt_ptr[0].val) */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttribute_st.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":12996 * pass * self._val = cudaLaunchAttributeValue(_ptr=&self._pvt_ptr[0].val) * def __dealloc__(self): # <<<<<<<<<<<<<< * pass * def getPtr(self): */ /* Python wrapper */ static void __pyx_pw_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_5__dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_pw_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_5__dealloc__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_pf_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_4__dealloc__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttribute_st *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_4__dealloc__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttribute_st *__pyx_v_self) { /* function exit code */ } /* "cuda/bindings/runtime.pyx":12998 * def __dealloc__(self): * pass * def getPtr(self): # <<<<<<<<<<<<<< * return self._pvt_ptr * def __repr__(self): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_7getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_6getPtr, "cudaLaunchAttribute_st.getPtr(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_7getPtr = {"getPtr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_7getPtr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_6getPtr}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_7getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("getPtr (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("getPtr", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("getPtr", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_6getPtr(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttribute_st *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_6getPtr(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttribute_st *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("getPtr", 0); /* "cuda/bindings/runtime.pyx":12999 * pass * def getPtr(self): * return self._pvt_ptr # <<<<<<<<<<<<<< * def __repr__(self): * if self._pvt_ptr is not NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_v_self->_pvt_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12999, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":12998 * def __dealloc__(self): * pass * def getPtr(self): # <<<<<<<<<<<<<< * return self._pvt_ptr * def __repr__(self): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttribute_st.getPtr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13000 * def getPtr(self): * return self._pvt_ptr * def __repr__(self): # <<<<<<<<<<<<<< * if self._pvt_ptr is not NULL: * str_list = [] */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_9__repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_9__repr__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_8__repr__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttribute_st *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_8__repr__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttribute_st *__pyx_v_self) { PyObject *__pyx_v_str_list = NULL; PyObject *__pyx_10genexpr176__pyx_v_line = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; Py_ssize_t __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "cuda/bindings/runtime.pyx":13001 * return self._pvt_ptr * def __repr__(self): * if self._pvt_ptr is not NULL: # <<<<<<<<<<<<<< * str_list = [] * try: */ __pyx_t_1 = (__pyx_v_self->_pvt_ptr != NULL); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":13002 * def __repr__(self): * if self._pvt_ptr is not NULL: * str_list = [] # <<<<<<<<<<<<<< * try: * str_list += ['id : ' + str(self.id)] */ __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13002, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_str_list = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":13003 * if self._pvt_ptr is not NULL: * str_list = [] * try: # <<<<<<<<<<<<<< * str_list += ['id : ' + str(self.id)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "cuda/bindings/runtime.pyx":13004 * str_list = [] * try: * str_list += ['id : ' + str(self.id)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['id : '] */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_id_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13004, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __Pyx_PyObject_Unicode(__pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13004, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_id, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13004, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13004, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 13004, __pyx_L4_error); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13004, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_2)); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":13003 * if self._pvt_ptr is not NULL: * str_list = [] * try: # <<<<<<<<<<<<<< * str_list += ['id : ' + str(self.id)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; /* "cuda/bindings/runtime.pyx":13005 * try: * str_list += ['id : ' + str(self.id)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['id : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttribute_st.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_2, &__pyx_t_6, &__pyx_t_8) < 0) __PYX_ERR(0, 13005, __pyx_L6_except_error) __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_8); /* "cuda/bindings/runtime.pyx":13006 * str_list += ['id : ' + str(self.id)] * except ValueError: * str_list += ['id : '] # <<<<<<<<<<<<<< * try: * str_list += ['val :\n' + '\n'.join([' ' + line for line in str(self.val).splitlines()])] */ __pyx_t_9 = PyList_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 13006, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_id_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_id_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 0, __pyx_mstate_global->__pyx_kp_u_id_ValueError) != (0)) __PYX_ERR(0, 13006, __pyx_L6_except_error); __pyx_t_10 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13006, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_10)); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L5_exception_handled; } goto __pyx_L6_except_error; /* "cuda/bindings/runtime.pyx":13003 * if self._pvt_ptr is not NULL: * str_list = [] * try: # <<<<<<<<<<<<<< * str_list += ['id : ' + str(self.id)] * except ValueError: */ __pyx_L6_except_error:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L5_exception_handled:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); __pyx_L9_try_end:; } /* "cuda/bindings/runtime.pyx":13007 * except ValueError: * str_list += ['id : '] * try: # <<<<<<<<<<<<<< * str_list += ['val :\n' + '\n'.join([' ' + line for line in str(self.val).splitlines()])] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_5, &__pyx_t_4, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_5); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "cuda/bindings/runtime.pyx":13008 * str_list += ['id : '] * try: * str_list += ['val :\n' + '\n'.join([' ' + line for line in str(self.val).splitlines()])] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['val : '] */ { /* enter inner scope */ __pyx_t_8 = PyList_New(0); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13008, __pyx_L20_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_val_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13008, __pyx_L20_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_2 = __Pyx_PyObject_Unicode(__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13008, __pyx_L20_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyUnicode_Splitlines(((PyObject*)__pyx_t_2), 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13008, __pyx_L20_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __pyx_t_6; __Pyx_INCREF(__pyx_t_2); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; for (;;) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_2); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 13008, __pyx_L20_error) #endif if (__pyx_t_11 >= __pyx_temp) break; } __pyx_t_6 = __Pyx_PyList_GetItemRef(__pyx_t_2, __pyx_t_11); ++__pyx_t_11; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13008, __pyx_L20_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_10genexpr176__pyx_v_line, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyNumber_Add(__pyx_mstate_global->__pyx_kp_u__4, __pyx_10genexpr176__pyx_v_line); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13008, __pyx_L20_error) __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_8, (PyObject*)__pyx_t_6))) __PYX_ERR(0, 13008, __pyx_L20_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_10genexpr176__pyx_v_line); __pyx_10genexpr176__pyx_v_line = 0; goto __pyx_L24_exit_scope; __pyx_L20_error:; __Pyx_XDECREF(__pyx_10genexpr176__pyx_v_line); __pyx_10genexpr176__pyx_v_line = 0; goto __pyx_L12_error; __pyx_L24_exit_scope:; } /* exit inner scope */ __pyx_t_2 = PyUnicode_Join(__pyx_mstate_global->__pyx_kp_u__2, __pyx_t_8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13008, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_val, __pyx_t_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13008, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13008, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_8); if (__Pyx_PyList_SET_ITEM(__pyx_t_2, 0, __pyx_t_8) != (0)) __PYX_ERR(0, 13008, __pyx_L12_error); __pyx_t_8 = 0; __pyx_t_8 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13008, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_8)); __pyx_t_8 = 0; /* "cuda/bindings/runtime.pyx":13007 * except ValueError: * str_list += ['id : '] * try: # <<<<<<<<<<<<<< * str_list += ['val :\n' + '\n'.join([' ' + line for line in str(self.val).splitlines()])] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L17_try_end; __pyx_L12_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":13009 * try: * str_list += ['val :\n' + '\n'.join([' ' + line for line in str(self.val).splitlines()])] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['val : '] * return '\n'.join(str_list) */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttribute_st.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_8, &__pyx_t_2, &__pyx_t_6) < 0) __PYX_ERR(0, 13009, __pyx_L14_except_error) __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_6); /* "cuda/bindings/runtime.pyx":13010 * str_list += ['val :\n' + '\n'.join([' ' + line for line in str(self.val).splitlines()])] * except ValueError: * str_list += ['val : '] # <<<<<<<<<<<<<< * return '\n'.join(str_list) * else: */ __pyx_t_10 = PyList_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13010, __pyx_L14_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_val_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_val_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_10, 0, __pyx_mstate_global->__pyx_kp_u_val_ValueError) != (0)) __PYX_ERR(0, 13010, __pyx_L14_except_error); __pyx_t_9 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 13010, __pyx_L14_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_9)); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; goto __pyx_L13_exception_handled; } goto __pyx_L14_except_error; /* "cuda/bindings/runtime.pyx":13007 * except ValueError: * str_list += ['id : '] * try: # <<<<<<<<<<<<<< * str_list += ['val :\n' + '\n'.join([' ' + line for line in str(self.val).splitlines()])] * except ValueError: */ __pyx_L14_except_error:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); goto __pyx_L1_error; __pyx_L13_exception_handled:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); __pyx_L17_try_end:; } /* "cuda/bindings/runtime.pyx":13011 * except ValueError: * str_list += ['val : '] * return '\n'.join(str_list) # <<<<<<<<<<<<<< * else: * return '' */ __Pyx_XDECREF(__pyx_r); __pyx_t_6 = PyUnicode_Join(__pyx_mstate_global->__pyx_kp_u__2, __pyx_v_str_list); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13011, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13001 * return self._pvt_ptr * def __repr__(self): * if self._pvt_ptr is not NULL: # <<<<<<<<<<<<<< * str_list = [] * try: */ } /* "cuda/bindings/runtime.pyx":13013 * return '\n'.join(str_list) * else: * return '' # <<<<<<<<<<<<<< * @property * def id(self): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u__3); __pyx_r = __pyx_mstate_global->__pyx_kp_u__3; goto __pyx_L0; } /* "cuda/bindings/runtime.pyx":13000 * def getPtr(self): * return self._pvt_ptr * def __repr__(self): # <<<<<<<<<<<<<< * if self._pvt_ptr is not NULL: * str_list = [] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttribute_st.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_str_list); __Pyx_XDECREF(__pyx_10genexpr176__pyx_v_line); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13014 * else: * return '' * @property # <<<<<<<<<<<<<< * def id(self): * if self._pvt_ptr[0].id not in _dict_cudaLaunchAttributeID: */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_2id_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_2id_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_2id___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttribute_st *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_2id___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttribute_st *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":13016 * @property * def id(self): * if self._pvt_ptr[0].id not in _dict_cudaLaunchAttributeID: # <<<<<<<<<<<<<< * return None * return _dict_cudaLaunchAttributeID[self._pvt_ptr[0].id] */ __pyx_t_1 = __Pyx_PyLong_From_enum__cudaLaunchAttributeID((__pyx_v_self->_pvt_ptr[0]).id); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13016, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaLaunchAttributeID); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13016, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = (__Pyx_PySequence_ContainsTF(__pyx_t_1, __pyx_t_2, Py_NE)); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 13016, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (__pyx_t_3) { /* "cuda/bindings/runtime.pyx":13017 * def id(self): * if self._pvt_ptr[0].id not in _dict_cudaLaunchAttributeID: * return None # <<<<<<<<<<<<<< * return _dict_cudaLaunchAttributeID[self._pvt_ptr[0].id] * @id.setter */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13016 * @property * def id(self): * if self._pvt_ptr[0].id not in _dict_cudaLaunchAttributeID: # <<<<<<<<<<<<<< * return None * return _dict_cudaLaunchAttributeID[self._pvt_ptr[0].id] */ } /* "cuda/bindings/runtime.pyx":13018 * if self._pvt_ptr[0].id not in _dict_cudaLaunchAttributeID: * return None * return _dict_cudaLaunchAttributeID[self._pvt_ptr[0].id] # <<<<<<<<<<<<<< * @id.setter * def id(self, id not None : cudaLaunchAttributeID): */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaLaunchAttributeID); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13018, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyLong_From_enum__cudaLaunchAttributeID((__pyx_v_self->_pvt_ptr[0]).id); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13018, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_2, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13018, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13014 * else: * return '' * @property # <<<<<<<<<<<<<< * def id(self): * if self._pvt_ptr[0].id not in _dict_cudaLaunchAttributeID: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttribute_st.id.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13019 * return None * return _dict_cudaLaunchAttributeID[self._pvt_ptr[0].id] * @id.setter # <<<<<<<<<<<<<< * def id(self, id not None : cudaLaunchAttributeID): * self._pvt_ptr[0].id = id.value */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_2id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_id); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_2id_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_id) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); if (unlikely(((PyObject *)__pyx_v_id) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "id"); __PYX_ERR(0, 13020, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_2id_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttribute_st *)__pyx_v_self), ((PyObject *)__pyx_v_id)); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; goto __pyx_L5_cleaned_up; __pyx_L0:; __pyx_L5_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_2id_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttribute_st *__pyx_v_self, PyObject *__pyx_v_id) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; enum cudaLaunchAttributeID __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); /* "cuda/bindings/runtime.pyx":13021 * @id.setter * def id(self, id not None : cudaLaunchAttributeID): * self._pvt_ptr[0].id = id.value # <<<<<<<<<<<<<< * @property * def val(self): */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_id, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13021, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = ((enum cudaLaunchAttributeID)__Pyx_PyLong_As_enum__cudaLaunchAttributeID(__pyx_t_1)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 13021, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; (__pyx_v_self->_pvt_ptr[0]).id = __pyx_t_2; /* "cuda/bindings/runtime.pyx":13019 * return None * return _dict_cudaLaunchAttributeID[self._pvt_ptr[0].id] * @id.setter # <<<<<<<<<<<<<< * def id(self, id not None : cudaLaunchAttributeID): * self._pvt_ptr[0].id = id.value */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttribute_st.id.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13022 * def id(self, id not None : cudaLaunchAttributeID): * self._pvt_ptr[0].id = id.value * @property # <<<<<<<<<<<<<< * def val(self): * return self._val */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_3val_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_3val_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_3val___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttribute_st *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_3val___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttribute_st *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":13024 * @property * def val(self): * return self._val # <<<<<<<<<<<<<< * @val.setter * def val(self, val not None : cudaLaunchAttributeValue): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF((PyObject *)__pyx_v_self->_val); __pyx_r = ((PyObject *)__pyx_v_self->_val); goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13022 * def id(self, id not None : cudaLaunchAttributeID): * self._pvt_ptr[0].id = id.value * @property # <<<<<<<<<<<<<< * def val(self): * return self._val */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13025 * def val(self): * return self._val * @val.setter # <<<<<<<<<<<<<< * def val(self, val not None : cudaLaunchAttributeValue): * string.memcpy(&self._pvt_ptr[0].val, val.getPtr(), sizeof(self._pvt_ptr[0].val)) */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_3val_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_3val_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_val) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_val), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaLaunchAttributeValue, 0, "val", 0))) __PYX_ERR(0, 13026, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_3val_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttribute_st *)__pyx_v_self), ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *)__pyx_v_val)); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; goto __pyx_L5_cleaned_up; __pyx_L0:; __pyx_L5_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_3val_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttribute_st *__pyx_v_self, struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttributeValue *__pyx_v_val) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; size_t __pyx_t_3; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); /* "cuda/bindings/runtime.pyx":13027 * @val.setter * def val(self, val not None : cudaLaunchAttributeValue): * string.memcpy(&self._pvt_ptr[0].val, val.getPtr(), sizeof(self._pvt_ptr[0].val)) # <<<<<<<<<<<<<< * * cdef class anon_struct27: */ __pyx_t_2 = ((PyObject *)__pyx_v_val); __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCallMethod(__pyx_mstate_global->__pyx_n_u_getPtr, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13027, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13027, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; (void)(memcpy((&(__pyx_v_self->_pvt_ptr[0]).val), ((union cudaLaunchAttributeValue *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_4)), (sizeof((__pyx_v_self->_pvt_ptr[0]).val)))); /* "cuda/bindings/runtime.pyx":13025 * def val(self): * return self._val * @val.setter # <<<<<<<<<<<<<< * def val(self, val not None : cudaLaunchAttributeValue): * string.memcpy(&self._pvt_ptr[0].val, val.getPtr(), sizeof(self._pvt_ptr[0].val)) */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttribute_st.val.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_10__reduce_cython__, "cudaLaunchAttribute_st.__reduce_cython__(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_11__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_11__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_10__reduce_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_10__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttribute_st *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_10__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttribute_st *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttribute_st.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_12__setstate_cython__, "cudaLaunchAttribute_st.__setstate_cython__(self, __pyx_state)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_13__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_13__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_12__setstate_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(2, 3, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(2, 3, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(2, 3, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) } __pyx_v___pyx_state = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(2, 3, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttribute_st.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_12__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttribute_st *)__pyx_v_self), __pyx_v___pyx_state); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_22cudaLaunchAttribute_st_12__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_cudaLaunchAttribute_st *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchAttribute_st.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13041 * Get memory address of class instance * """ * def __cinit__(self, void_ptr _ptr): # <<<<<<<<<<<<<< * self._pvt_ptr = _ptr * */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_13anon_struct27_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_13anon_struct27_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr; CYTHON_UNUSED Py_ssize_t __pyx_nargs; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1; #endif __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13041, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13041, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__cinit__", 0) < (0)) __PYX_ERR(0, 13041, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 1, 1, i); __PYX_ERR(0, 13041, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13041, __pyx_L3_error) } __pyx_v__ptr = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[0]); if (unlikely((__pyx_v__ptr == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13041, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 13041, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct27.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_13anon_struct27___cinit__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct27 *)__pyx_v_self), __pyx_v__ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_13anon_struct27___cinit__(struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct27 *__pyx_v_self, __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr) { int __pyx_r; /* "cuda/bindings/runtime.pyx":13042 * """ * def __cinit__(self, void_ptr _ptr): * self._pvt_ptr = _ptr # <<<<<<<<<<<<<< * * def __init__(self, void_ptr _ptr): */ __pyx_v_self->_pvt_ptr = ((struct cudaAsyncNotificationInfo *)__pyx_v__ptr); /* "cuda/bindings/runtime.pyx":13041 * Get memory address of class instance * """ * def __cinit__(self, void_ptr _ptr): # <<<<<<<<<<<<<< * self._pvt_ptr = _ptr * */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":13044 * self._pvt_ptr = _ptr * * def __init__(self, void_ptr _ptr): # <<<<<<<<<<<<<< * pass * def __dealloc__(self): */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_13anon_struct27_3__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_13anon_struct27_3__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr; CYTHON_UNUSED Py_ssize_t __pyx_nargs; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1; #endif __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13044, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13044, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__init__", 0) < (0)) __PYX_ERR(0, 13044, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, i); __PYX_ERR(0, 13044, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13044, __pyx_L3_error) } __pyx_v__ptr = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[0]); if (unlikely((__pyx_v__ptr == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13044, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 13044, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct27.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_13anon_struct27_2__init__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct27 *)__pyx_v_self), __pyx_v__ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_13anon_struct27_2__init__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct27 *__pyx_v_self, CYTHON_UNUSED __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr) { int __pyx_r; /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":13046 * def __init__(self, void_ptr _ptr): * pass * def __dealloc__(self): # <<<<<<<<<<<<<< * pass * def getPtr(self): */ /* Python wrapper */ static void __pyx_pw_4cuda_8bindings_7runtime_13anon_struct27_5__dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_pw_4cuda_8bindings_7runtime_13anon_struct27_5__dealloc__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_pf_4cuda_8bindings_7runtime_13anon_struct27_4__dealloc__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct27 *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_4cuda_8bindings_7runtime_13anon_struct27_4__dealloc__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct27 *__pyx_v_self) { /* function exit code */ } /* "cuda/bindings/runtime.pyx":13048 * def __dealloc__(self): * pass * def getPtr(self): # <<<<<<<<<<<<<< * return &self._pvt_ptr[0].info.overBudget * def __repr__(self): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13anon_struct27_7getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_13anon_struct27_6getPtr, "anon_struct27.getPtr(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_13anon_struct27_7getPtr = {"getPtr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_13anon_struct27_7getPtr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_13anon_struct27_6getPtr}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13anon_struct27_7getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("getPtr (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("getPtr", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("getPtr", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_13anon_struct27_6getPtr(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct27 *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_13anon_struct27_6getPtr(struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct27 *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("getPtr", 0); /* "cuda/bindings/runtime.pyx":13049 * pass * def getPtr(self): * return &self._pvt_ptr[0].info.overBudget # <<<<<<<<<<<<<< * def __repr__(self): * if self._pvt_ptr is not NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)(&(__pyx_v_self->_pvt_ptr[0]).info.overBudget))); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13049, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13048 * def __dealloc__(self): * pass * def getPtr(self): # <<<<<<<<<<<<<< * return &self._pvt_ptr[0].info.overBudget * def __repr__(self): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct27.getPtr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13050 * def getPtr(self): * return &self._pvt_ptr[0].info.overBudget * def __repr__(self): # <<<<<<<<<<<<<< * if self._pvt_ptr is not NULL: * str_list = [] */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13anon_struct27_9__repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13anon_struct27_9__repr__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_13anon_struct27_8__repr__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct27 *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_13anon_struct27_8__repr__(struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct27 *__pyx_v_self) { PyObject *__pyx_v_str_list = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "cuda/bindings/runtime.pyx":13051 * return &self._pvt_ptr[0].info.overBudget * def __repr__(self): * if self._pvt_ptr is not NULL: # <<<<<<<<<<<<<< * str_list = [] * try: */ __pyx_t_1 = (__pyx_v_self->_pvt_ptr != NULL); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":13052 * def __repr__(self): * if self._pvt_ptr is not NULL: * str_list = [] # <<<<<<<<<<<<<< * try: * str_list += ['bytesOverBudget : ' + str(self.bytesOverBudget)] */ __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13052, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_str_list = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":13053 * if self._pvt_ptr is not NULL: * str_list = [] * try: # <<<<<<<<<<<<<< * str_list += ['bytesOverBudget : ' + str(self.bytesOverBudget)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "cuda/bindings/runtime.pyx":13054 * str_list = [] * try: * str_list += ['bytesOverBudget : ' + str(self.bytesOverBudget)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['bytesOverBudget : '] */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_bytesOverBudget_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13054, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __Pyx_PyObject_Unicode(__pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13054, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_bytesOverBudget, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13054, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13054, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 13054, __pyx_L4_error); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13054, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_2)); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":13053 * if self._pvt_ptr is not NULL: * str_list = [] * try: # <<<<<<<<<<<<<< * str_list += ['bytesOverBudget : ' + str(self.bytesOverBudget)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; /* "cuda/bindings/runtime.pyx":13055 * try: * str_list += ['bytesOverBudget : ' + str(self.bytesOverBudget)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['bytesOverBudget : '] * return '\n'.join(str_list) */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct27.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_2, &__pyx_t_6, &__pyx_t_8) < 0) __PYX_ERR(0, 13055, __pyx_L6_except_error) __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_8); /* "cuda/bindings/runtime.pyx":13056 * str_list += ['bytesOverBudget : ' + str(self.bytesOverBudget)] * except ValueError: * str_list += ['bytesOverBudget : '] # <<<<<<<<<<<<<< * return '\n'.join(str_list) * else: */ __pyx_t_9 = PyList_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 13056, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_bytesOverBudget_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_bytesOverBudget_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 0, __pyx_mstate_global->__pyx_kp_u_bytesOverBudget_ValueError) != (0)) __PYX_ERR(0, 13056, __pyx_L6_except_error); __pyx_t_10 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13056, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_10)); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L5_exception_handled; } goto __pyx_L6_except_error; /* "cuda/bindings/runtime.pyx":13053 * if self._pvt_ptr is not NULL: * str_list = [] * try: # <<<<<<<<<<<<<< * str_list += ['bytesOverBudget : ' + str(self.bytesOverBudget)] * except ValueError: */ __pyx_L6_except_error:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L5_exception_handled:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); __pyx_L9_try_end:; } /* "cuda/bindings/runtime.pyx":13057 * except ValueError: * str_list += ['bytesOverBudget : '] * return '\n'.join(str_list) # <<<<<<<<<<<<<< * else: * return '' */ __Pyx_XDECREF(__pyx_r); __pyx_t_8 = PyUnicode_Join(__pyx_mstate_global->__pyx_kp_u__2, __pyx_v_str_list); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13057, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_r = __pyx_t_8; __pyx_t_8 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13051 * return &self._pvt_ptr[0].info.overBudget * def __repr__(self): * if self._pvt_ptr is not NULL: # <<<<<<<<<<<<<< * str_list = [] * try: */ } /* "cuda/bindings/runtime.pyx":13059 * return '\n'.join(str_list) * else: * return '' # <<<<<<<<<<<<<< * @property * def bytesOverBudget(self): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u__3); __pyx_r = __pyx_mstate_global->__pyx_kp_u__3; goto __pyx_L0; } /* "cuda/bindings/runtime.pyx":13050 * def getPtr(self): * return &self._pvt_ptr[0].info.overBudget * def __repr__(self): # <<<<<<<<<<<<<< * if self._pvt_ptr is not NULL: * str_list = [] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct27.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_str_list); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13060 * else: * return '' * @property # <<<<<<<<<<<<<< * def bytesOverBudget(self): * return self._pvt_ptr[0].info.overBudget.bytesOverBudget */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13anon_struct27_15bytesOverBudget_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13anon_struct27_15bytesOverBudget_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_13anon_struct27_15bytesOverBudget___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct27 *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_13anon_struct27_15bytesOverBudget___get__(struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct27 *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":13062 * @property * def bytesOverBudget(self): * return self._pvt_ptr[0].info.overBudget.bytesOverBudget # <<<<<<<<<<<<<< * @bytesOverBudget.setter * def bytesOverBudget(self, unsigned long long bytesOverBudget): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG((__pyx_v_self->_pvt_ptr[0]).info.overBudget.bytesOverBudget); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13062, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13060 * else: * return '' * @property # <<<<<<<<<<<<<< * def bytesOverBudget(self): * return self._pvt_ptr[0].info.overBudget.bytesOverBudget */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct27.bytesOverBudget.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13063 * def bytesOverBudget(self): * return self._pvt_ptr[0].info.overBudget.bytesOverBudget * @bytesOverBudget.setter # <<<<<<<<<<<<<< * def bytesOverBudget(self, unsigned long long bytesOverBudget): * self._pvt_ptr[0].info.overBudget.bytesOverBudget = bytesOverBudget */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_13anon_struct27_15bytesOverBudget_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_bytesOverBudget); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_13anon_struct27_15bytesOverBudget_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_bytesOverBudget) { unsigned PY_LONG_LONG __pyx_v_bytesOverBudget; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); assert(__pyx_arg_bytesOverBudget); { __pyx_v_bytesOverBudget = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_arg_bytesOverBudget); if (unlikely((__pyx_v_bytesOverBudget == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13064, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct27.bytesOverBudget.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_13anon_struct27_15bytesOverBudget_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct27 *)__pyx_v_self), ((unsigned PY_LONG_LONG)__pyx_v_bytesOverBudget)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_13anon_struct27_15bytesOverBudget_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct27 *__pyx_v_self, unsigned PY_LONG_LONG __pyx_v_bytesOverBudget) { int __pyx_r; /* "cuda/bindings/runtime.pyx":13065 * @bytesOverBudget.setter * def bytesOverBudget(self, unsigned long long bytesOverBudget): * self._pvt_ptr[0].info.overBudget.bytesOverBudget = bytesOverBudget # <<<<<<<<<<<<<< * * cdef class anon_union10: */ (__pyx_v_self->_pvt_ptr[0]).info.overBudget.bytesOverBudget = __pyx_v_bytesOverBudget; /* "cuda/bindings/runtime.pyx":13063 * def bytesOverBudget(self): * return self._pvt_ptr[0].info.overBudget.bytesOverBudget * @bytesOverBudget.setter # <<<<<<<<<<<<<< * def bytesOverBudget(self, unsigned long long bytesOverBudget): * self._pvt_ptr[0].info.overBudget.bytesOverBudget = bytesOverBudget */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13anon_struct27_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_13anon_struct27_10__reduce_cython__, "anon_struct27.__reduce_cython__(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_13anon_struct27_11__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_13anon_struct27_11__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_13anon_struct27_10__reduce_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13anon_struct27_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_13anon_struct27_10__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct27 *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_13anon_struct27_10__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct27 *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct27.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13anon_struct27_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_13anon_struct27_12__setstate_cython__, "anon_struct27.__setstate_cython__(self, __pyx_state)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_13anon_struct27_13__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_13anon_struct27_13__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_13anon_struct27_12__setstate_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13anon_struct27_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(2, 3, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(2, 3, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(2, 3, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) } __pyx_v___pyx_state = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(2, 3, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct27.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_13anon_struct27_12__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct27 *)__pyx_v_self), __pyx_v___pyx_state); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_13anon_struct27_12__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct27 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.anon_struct27.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13079 * Get memory address of class instance * """ * def __cinit__(self, void_ptr _ptr): # <<<<<<<<<<<<<< * self._pvt_ptr = _ptr * */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_12anon_union10_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_12anon_union10_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr; CYTHON_UNUSED Py_ssize_t __pyx_nargs; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1; #endif __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13079, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13079, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__cinit__", 0) < (0)) __PYX_ERR(0, 13079, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 1, 1, i); __PYX_ERR(0, 13079, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13079, __pyx_L3_error) } __pyx_v__ptr = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[0]); if (unlikely((__pyx_v__ptr == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13079, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 13079, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.anon_union10.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_12anon_union10___cinit__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_union10 *)__pyx_v_self), __pyx_v__ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_12anon_union10___cinit__(struct __pyx_obj_4cuda_8bindings_7runtime_anon_union10 *__pyx_v_self, __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr) { int __pyx_r; /* "cuda/bindings/runtime.pyx":13080 * """ * def __cinit__(self, void_ptr _ptr): * self._pvt_ptr = _ptr # <<<<<<<<<<<<<< * * def __init__(self, void_ptr _ptr): */ __pyx_v_self->_pvt_ptr = ((struct cudaAsyncNotificationInfo *)__pyx_v__ptr); /* "cuda/bindings/runtime.pyx":13079 * Get memory address of class instance * """ * def __cinit__(self, void_ptr _ptr): # <<<<<<<<<<<<<< * self._pvt_ptr = _ptr * */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":13082 * self._pvt_ptr = _ptr * * def __init__(self, void_ptr _ptr): # <<<<<<<<<<<<<< * pass * self._overBudget = anon_struct27(_ptr=self._pvt_ptr) */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_12anon_union10_3__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_12anon_union10_3__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr; CYTHON_UNUSED Py_ssize_t __pyx_nargs; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1; #endif __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13082, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13082, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__init__", 0) < (0)) __PYX_ERR(0, 13082, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, i); __PYX_ERR(0, 13082, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13082, __pyx_L3_error) } __pyx_v__ptr = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[0]); if (unlikely((__pyx_v__ptr == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13082, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 13082, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.anon_union10.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_12anon_union10_2__init__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_union10 *)__pyx_v_self), __pyx_v__ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_12anon_union10_2__init__(struct __pyx_obj_4cuda_8bindings_7runtime_anon_union10 *__pyx_v_self, CYTHON_UNUSED __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__init__", 0); /* "cuda/bindings/runtime.pyx":13084 * def __init__(self, void_ptr _ptr): * pass * self._overBudget = anon_struct27(_ptr=self._pvt_ptr) # <<<<<<<<<<<<<< * def __dealloc__(self): * pass */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_anon_struct27); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_anon_struct27); __pyx_t_4 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_v_self->_pvt_ptr)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_2, NULL}; __pyx_t_6 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_ptr_2, __pyx_t_4, __pyx_t_6, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 13084, __pyx_L1_error) __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder(__pyx_t_3, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_6); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13084, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __Pyx_GIVEREF((PyObject *)__pyx_t_1); __Pyx_GOTREF((PyObject *)__pyx_v_self->_overBudget); __Pyx_DECREF((PyObject *)__pyx_v_self->_overBudget); __pyx_v_self->_overBudget = ((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct27 *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":13082 * self._pvt_ptr = _ptr * * def __init__(self, void_ptr _ptr): # <<<<<<<<<<<<<< * pass * self._overBudget = anon_struct27(_ptr=self._pvt_ptr) */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("cuda.bindings.runtime.anon_union10.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13085 * pass * self._overBudget = anon_struct27(_ptr=self._pvt_ptr) * def __dealloc__(self): # <<<<<<<<<<<<<< * pass * def getPtr(self): */ /* Python wrapper */ static void __pyx_pw_4cuda_8bindings_7runtime_12anon_union10_5__dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_pw_4cuda_8bindings_7runtime_12anon_union10_5__dealloc__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_pf_4cuda_8bindings_7runtime_12anon_union10_4__dealloc__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_union10 *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_4cuda_8bindings_7runtime_12anon_union10_4__dealloc__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_anon_union10 *__pyx_v_self) { /* function exit code */ } /* "cuda/bindings/runtime.pyx":13087 * def __dealloc__(self): * pass * def getPtr(self): # <<<<<<<<<<<<<< * return &self._pvt_ptr[0].info * def __repr__(self): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_12anon_union10_7getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_12anon_union10_6getPtr, "anon_union10.getPtr(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_12anon_union10_7getPtr = {"getPtr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_12anon_union10_7getPtr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_12anon_union10_6getPtr}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_12anon_union10_7getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("getPtr (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("getPtr", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("getPtr", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_12anon_union10_6getPtr(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_union10 *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_12anon_union10_6getPtr(struct __pyx_obj_4cuda_8bindings_7runtime_anon_union10 *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("getPtr", 0); /* "cuda/bindings/runtime.pyx":13088 * pass * def getPtr(self): * return &self._pvt_ptr[0].info # <<<<<<<<<<<<<< * def __repr__(self): * if self._pvt_ptr is not NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)(&(__pyx_v_self->_pvt_ptr[0]).info))); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13088, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13087 * def __dealloc__(self): * pass * def getPtr(self): # <<<<<<<<<<<<<< * return &self._pvt_ptr[0].info * def __repr__(self): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.anon_union10.getPtr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13089 * def getPtr(self): * return &self._pvt_ptr[0].info * def __repr__(self): # <<<<<<<<<<<<<< * if self._pvt_ptr is not NULL: * str_list = [] */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_12anon_union10_9__repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_12anon_union10_9__repr__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_12anon_union10_8__repr__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_union10 *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_12anon_union10_8__repr__(struct __pyx_obj_4cuda_8bindings_7runtime_anon_union10 *__pyx_v_self) { PyObject *__pyx_v_str_list = NULL; PyObject *__pyx_10genexpr177__pyx_v_line = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; int __pyx_t_9; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "cuda/bindings/runtime.pyx":13090 * return &self._pvt_ptr[0].info * def __repr__(self): * if self._pvt_ptr is not NULL: # <<<<<<<<<<<<<< * str_list = [] * try: */ __pyx_t_1 = (__pyx_v_self->_pvt_ptr != NULL); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":13091 * def __repr__(self): * if self._pvt_ptr is not NULL: * str_list = [] # <<<<<<<<<<<<<< * try: * str_list += ['overBudget :\n' + '\n'.join([' ' + line for line in str(self.overBudget).splitlines()])] */ __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13091, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_str_list = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":13092 * if self._pvt_ptr is not NULL: * str_list = [] * try: # <<<<<<<<<<<<<< * str_list += ['overBudget :\n' + '\n'.join([' ' + line for line in str(self.overBudget).splitlines()])] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "cuda/bindings/runtime.pyx":13093 * str_list = [] * try: * str_list += ['overBudget :\n' + '\n'.join([' ' + line for line in str(self.overBudget).splitlines()])] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['overBudget : '] */ { /* enter inner scope */ __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13093, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_overBudget_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13093, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyObject_Unicode(__pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13093, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyUnicode_Splitlines(((PyObject*)__pyx_t_7), 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13093, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __pyx_t_6; __Pyx_INCREF(__pyx_t_7); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; for (;;) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_7); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 13093, __pyx_L12_error) #endif if (__pyx_t_8 >= __pyx_temp) break; } __pyx_t_6 = __Pyx_PyList_GetItemRef(__pyx_t_7, __pyx_t_8); ++__pyx_t_8; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13093, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_10genexpr177__pyx_v_line, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyNumber_Add(__pyx_mstate_global->__pyx_kp_u__4, __pyx_10genexpr177__pyx_v_line); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13093, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(0, 13093, __pyx_L12_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_10genexpr177__pyx_v_line); __pyx_10genexpr177__pyx_v_line = 0; goto __pyx_L16_exit_scope; __pyx_L12_error:; __Pyx_XDECREF(__pyx_10genexpr177__pyx_v_line); __pyx_10genexpr177__pyx_v_line = 0; goto __pyx_L4_error; __pyx_L16_exit_scope:; } /* exit inner scope */ __pyx_t_7 = PyUnicode_Join(__pyx_mstate_global->__pyx_kp_u__2, __pyx_t_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13093, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_overBudget, __pyx_t_7); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13093, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = PyList_New(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13093, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 13093, __pyx_L4_error); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_7); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13093, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_2)); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":13092 * if self._pvt_ptr is not NULL: * str_list = [] * try: # <<<<<<<<<<<<<< * str_list += ['overBudget :\n' + '\n'.join([' ' + line for line in str(self.overBudget).splitlines()])] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; /* "cuda/bindings/runtime.pyx":13094 * try: * str_list += ['overBudget :\n' + '\n'.join([' ' + line for line in str(self.overBudget).splitlines()])] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['overBudget : '] * return '\n'.join(str_list) */ __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_9) { __Pyx_AddTraceback("cuda.bindings.runtime.anon_union10.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_2, &__pyx_t_7, &__pyx_t_6) < 0) __PYX_ERR(0, 13094, __pyx_L6_except_error) __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_6); /* "cuda/bindings/runtime.pyx":13095 * str_list += ['overBudget :\n' + '\n'.join([' ' + line for line in str(self.overBudget).splitlines()])] * except ValueError: * str_list += ['overBudget : '] # <<<<<<<<<<<<<< * return '\n'.join(str_list) * else: */ __pyx_t_10 = PyList_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13095, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_overBudget_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_overBudget_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_10, 0, __pyx_mstate_global->__pyx_kp_u_overBudget_ValueError) != (0)) __PYX_ERR(0, 13095, __pyx_L6_except_error); __pyx_t_11 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_10); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 13095, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_11)); __pyx_t_11 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; goto __pyx_L5_exception_handled; } goto __pyx_L6_except_error; /* "cuda/bindings/runtime.pyx":13092 * if self._pvt_ptr is not NULL: * str_list = [] * try: # <<<<<<<<<<<<<< * str_list += ['overBudget :\n' + '\n'.join([' ' + line for line in str(self.overBudget).splitlines()])] * except ValueError: */ __pyx_L6_except_error:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L5_exception_handled:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); __pyx_L9_try_end:; } /* "cuda/bindings/runtime.pyx":13096 * except ValueError: * str_list += ['overBudget : '] * return '\n'.join(str_list) # <<<<<<<<<<<<<< * else: * return '' */ __Pyx_XDECREF(__pyx_r); __pyx_t_6 = PyUnicode_Join(__pyx_mstate_global->__pyx_kp_u__2, __pyx_v_str_list); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13096, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13090 * return &self._pvt_ptr[0].info * def __repr__(self): * if self._pvt_ptr is not NULL: # <<<<<<<<<<<<<< * str_list = [] * try: */ } /* "cuda/bindings/runtime.pyx":13098 * return '\n'.join(str_list) * else: * return '' # <<<<<<<<<<<<<< * @property * def overBudget(self): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u__3); __pyx_r = __pyx_mstate_global->__pyx_kp_u__3; goto __pyx_L0; } /* "cuda/bindings/runtime.pyx":13089 * def getPtr(self): * return &self._pvt_ptr[0].info * def __repr__(self): # <<<<<<<<<<<<<< * if self._pvt_ptr is not NULL: * str_list = [] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_10); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("cuda.bindings.runtime.anon_union10.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_str_list); __Pyx_XDECREF(__pyx_10genexpr177__pyx_v_line); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13099 * else: * return '' * @property # <<<<<<<<<<<<<< * def overBudget(self): * return self._overBudget */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_12anon_union10_10overBudget_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_12anon_union10_10overBudget_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_12anon_union10_10overBudget___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_union10 *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_12anon_union10_10overBudget___get__(struct __pyx_obj_4cuda_8bindings_7runtime_anon_union10 *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":13101 * @property * def overBudget(self): * return self._overBudget # <<<<<<<<<<<<<< * @overBudget.setter * def overBudget(self, overBudget not None : anon_struct27): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF((PyObject *)__pyx_v_self->_overBudget); __pyx_r = ((PyObject *)__pyx_v_self->_overBudget); goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13099 * else: * return '' * @property # <<<<<<<<<<<<<< * def overBudget(self): * return self._overBudget */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13102 * def overBudget(self): * return self._overBudget * @overBudget.setter # <<<<<<<<<<<<<< * def overBudget(self, overBudget not None : anon_struct27): * string.memcpy(&self._pvt_ptr[0].info.overBudget, overBudget.getPtr(), sizeof(self._pvt_ptr[0].info.overBudget)) */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_12anon_union10_10overBudget_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_overBudget); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_12anon_union10_10overBudget_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_overBudget) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_overBudget), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_anon_struct27, 0, "overBudget", 0))) __PYX_ERR(0, 13103, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_12anon_union10_10overBudget_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_union10 *)__pyx_v_self), ((struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct27 *)__pyx_v_overBudget)); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; goto __pyx_L5_cleaned_up; __pyx_L0:; __pyx_L5_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_12anon_union10_10overBudget_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_anon_union10 *__pyx_v_self, struct __pyx_obj_4cuda_8bindings_7runtime_anon_struct27 *__pyx_v_overBudget) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; size_t __pyx_t_3; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); /* "cuda/bindings/runtime.pyx":13104 * @overBudget.setter * def overBudget(self, overBudget not None : anon_struct27): * string.memcpy(&self._pvt_ptr[0].info.overBudget, overBudget.getPtr(), sizeof(self._pvt_ptr[0].info.overBudget)) # <<<<<<<<<<<<<< * * cdef class cudaAsyncNotificationInfo: */ __pyx_t_2 = ((PyObject *)__pyx_v_overBudget); __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCallMethod(__pyx_mstate_global->__pyx_n_u_getPtr, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13104, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13104, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; (void)(memcpy((&(__pyx_v_self->_pvt_ptr[0]).info.overBudget), ((struct anon_struct27 *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_4)), (sizeof((__pyx_v_self->_pvt_ptr[0]).info.overBudget)))); /* "cuda/bindings/runtime.pyx":13102 * def overBudget(self): * return self._overBudget * @overBudget.setter # <<<<<<<<<<<<<< * def overBudget(self, overBudget not None : anon_struct27): * string.memcpy(&self._pvt_ptr[0].info.overBudget, overBudget.getPtr(), sizeof(self._pvt_ptr[0].info.overBudget)) */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("cuda.bindings.runtime.anon_union10.overBudget.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_12anon_union10_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_12anon_union10_10__reduce_cython__, "anon_union10.__reduce_cython__(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_12anon_union10_11__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_12anon_union10_11__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_12anon_union10_10__reduce_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_12anon_union10_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_12anon_union10_10__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_union10 *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_12anon_union10_10__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_anon_union10 *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.anon_union10.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_12anon_union10_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_12anon_union10_12__setstate_cython__, "anon_union10.__setstate_cython__(self, __pyx_state)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_12anon_union10_13__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_12anon_union10_13__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_12anon_union10_12__setstate_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_12anon_union10_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(2, 3, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(2, 3, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(2, 3, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) } __pyx_v___pyx_state = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(2, 3, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.anon_union10.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_12anon_union10_12__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_union10 *)__pyx_v_self), __pyx_v___pyx_state); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_12anon_union10_12__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_anon_union10 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.anon_union10.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13123 * Get memory address of class instance * """ * def __cinit__(self, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * if _ptr == 0: * self._val_ptr = calloc(1, sizeof(cyruntime.cudaAsyncNotificationInfo)) */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr; CYTHON_UNUSED Py_ssize_t __pyx_nargs; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1; #endif __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13123, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13123, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__cinit__", 0) < (0)) __PYX_ERR(0, 13123, __pyx_L3_error) } else { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13123, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } } if (values[0]) { __pyx_v__ptr = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[0]); if (unlikely((__pyx_v__ptr == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13123, __pyx_L3_error) } else { __pyx_v__ptr = ((__pyx_t_4cuda_8bindings_7runtime_void_ptr)0); } } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 13123, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaAsyncNotificationInfo.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo___cinit__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaAsyncNotificationInfo *)__pyx_v_self), __pyx_v__ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo___cinit__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaAsyncNotificationInfo *__pyx_v_self, __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr) { int __pyx_r; int __pyx_t_1; struct cudaAsyncNotificationInfo *__pyx_t_2; /* "cuda/bindings/runtime.pyx":13124 * """ * def __cinit__(self, void_ptr _ptr = 0): * if _ptr == 0: # <<<<<<<<<<<<<< * self._val_ptr = calloc(1, sizeof(cyruntime.cudaAsyncNotificationInfo)) * self._pvt_ptr = self._val_ptr */ __pyx_t_1 = (__pyx_v__ptr == 0); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":13125 * def __cinit__(self, void_ptr _ptr = 0): * if _ptr == 0: * self._val_ptr = calloc(1, sizeof(cyruntime.cudaAsyncNotificationInfo)) # <<<<<<<<<<<<<< * self._pvt_ptr = self._val_ptr * else: */ __pyx_v_self->_val_ptr = ((struct cudaAsyncNotificationInfo *)calloc(1, (sizeof(struct cudaAsyncNotificationInfo)))); /* "cuda/bindings/runtime.pyx":13126 * if _ptr == 0: * self._val_ptr = calloc(1, sizeof(cyruntime.cudaAsyncNotificationInfo)) * self._pvt_ptr = self._val_ptr # <<<<<<<<<<<<<< * else: * self._pvt_ptr = _ptr */ __pyx_t_2 = __pyx_v_self->_val_ptr; __pyx_v_self->_pvt_ptr = __pyx_t_2; /* "cuda/bindings/runtime.pyx":13124 * """ * def __cinit__(self, void_ptr _ptr = 0): * if _ptr == 0: # <<<<<<<<<<<<<< * self._val_ptr = calloc(1, sizeof(cyruntime.cudaAsyncNotificationInfo)) * self._pvt_ptr = self._val_ptr */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":13128 * self._pvt_ptr = self._val_ptr * else: * self._pvt_ptr = _ptr # <<<<<<<<<<<<<< * def __init__(self, void_ptr _ptr = 0): * pass */ /*else*/ { __pyx_v_self->_pvt_ptr = ((struct cudaAsyncNotificationInfo *)__pyx_v__ptr); } __pyx_L3:; /* "cuda/bindings/runtime.pyx":13123 * Get memory address of class instance * """ * def __cinit__(self, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * if _ptr == 0: * self._val_ptr = calloc(1, sizeof(cyruntime.cudaAsyncNotificationInfo)) */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":13129 * else: * self._pvt_ptr = _ptr * def __init__(self, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * pass * self._info = anon_union10(_ptr=self._pvt_ptr) */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_3__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_3__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr; CYTHON_UNUSED Py_ssize_t __pyx_nargs; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1; #endif __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13129, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13129, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__init__", 0) < (0)) __PYX_ERR(0, 13129, __pyx_L3_error) } else { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13129, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } } if (values[0]) { __pyx_v__ptr = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[0]); if (unlikely((__pyx_v__ptr == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13129, __pyx_L3_error) } else { __pyx_v__ptr = ((__pyx_t_4cuda_8bindings_7runtime_void_ptr)0); } } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 13129, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaAsyncNotificationInfo.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_2__init__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaAsyncNotificationInfo *)__pyx_v_self), __pyx_v__ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_2__init__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaAsyncNotificationInfo *__pyx_v_self, CYTHON_UNUSED __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__init__", 0); /* "cuda/bindings/runtime.pyx":13131 * def __init__(self, void_ptr _ptr = 0): * pass * self._info = anon_union10(_ptr=self._pvt_ptr) # <<<<<<<<<<<<<< * def __dealloc__(self): * if self._val_ptr is not NULL: */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_anon_union10); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_anon_union10); __pyx_t_4 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_v_self->_pvt_ptr)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13131, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_2, NULL}; __pyx_t_6 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13131, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_ptr_2, __pyx_t_4, __pyx_t_6, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 13131, __pyx_L1_error) __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder(__pyx_t_3, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_6); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13131, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __Pyx_GIVEREF((PyObject *)__pyx_t_1); __Pyx_GOTREF((PyObject *)__pyx_v_self->_info); __Pyx_DECREF((PyObject *)__pyx_v_self->_info); __pyx_v_self->_info = ((struct __pyx_obj_4cuda_8bindings_7runtime_anon_union10 *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":13129 * else: * self._pvt_ptr = _ptr * def __init__(self, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * pass * self._info = anon_union10(_ptr=self._pvt_ptr) */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("cuda.bindings.runtime.cudaAsyncNotificationInfo.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13132 * pass * self._info = anon_union10(_ptr=self._pvt_ptr) * def __dealloc__(self): # <<<<<<<<<<<<<< * if self._val_ptr is not NULL: * free(self._val_ptr) */ /* Python wrapper */ static void __pyx_pw_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_5__dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_pw_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_5__dealloc__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_pf_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_4__dealloc__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaAsyncNotificationInfo *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_4__dealloc__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaAsyncNotificationInfo *__pyx_v_self) { int __pyx_t_1; /* "cuda/bindings/runtime.pyx":13133 * self._info = anon_union10(_ptr=self._pvt_ptr) * def __dealloc__(self): * if self._val_ptr is not NULL: # <<<<<<<<<<<<<< * free(self._val_ptr) * def getPtr(self): */ __pyx_t_1 = (__pyx_v_self->_val_ptr != NULL); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":13134 * def __dealloc__(self): * if self._val_ptr is not NULL: * free(self._val_ptr) # <<<<<<<<<<<<<< * def getPtr(self): * return self._pvt_ptr */ free(__pyx_v_self->_val_ptr); /* "cuda/bindings/runtime.pyx":13133 * self._info = anon_union10(_ptr=self._pvt_ptr) * def __dealloc__(self): * if self._val_ptr is not NULL: # <<<<<<<<<<<<<< * free(self._val_ptr) * def getPtr(self): */ } /* "cuda/bindings/runtime.pyx":13132 * pass * self._info = anon_union10(_ptr=self._pvt_ptr) * def __dealloc__(self): # <<<<<<<<<<<<<< * if self._val_ptr is not NULL: * free(self._val_ptr) */ /* function exit code */ } /* "cuda/bindings/runtime.pyx":13135 * if self._val_ptr is not NULL: * free(self._val_ptr) * def getPtr(self): # <<<<<<<<<<<<<< * return self._pvt_ptr * def __repr__(self): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_7getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_6getPtr, "cudaAsyncNotificationInfo.getPtr(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_7getPtr = {"getPtr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_7getPtr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_6getPtr}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_7getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("getPtr (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("getPtr", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("getPtr", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_6getPtr(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaAsyncNotificationInfo *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_6getPtr(struct __pyx_obj_4cuda_8bindings_7runtime_cudaAsyncNotificationInfo *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("getPtr", 0); /* "cuda/bindings/runtime.pyx":13136 * free(self._val_ptr) * def getPtr(self): * return self._pvt_ptr # <<<<<<<<<<<<<< * def __repr__(self): * if self._pvt_ptr is not NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_v_self->_pvt_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13136, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13135 * if self._val_ptr is not NULL: * free(self._val_ptr) * def getPtr(self): # <<<<<<<<<<<<<< * return self._pvt_ptr * def __repr__(self): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaAsyncNotificationInfo.getPtr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13137 * def getPtr(self): * return self._pvt_ptr * def __repr__(self): # <<<<<<<<<<<<<< * if self._pvt_ptr is not NULL: * str_list = [] */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_9__repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_9__repr__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_8__repr__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaAsyncNotificationInfo *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_8__repr__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaAsyncNotificationInfo *__pyx_v_self) { PyObject *__pyx_v_str_list = NULL; PyObject *__pyx_10genexpr178__pyx_v_line = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; Py_ssize_t __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "cuda/bindings/runtime.pyx":13138 * return self._pvt_ptr * def __repr__(self): * if self._pvt_ptr is not NULL: # <<<<<<<<<<<<<< * str_list = [] * try: */ __pyx_t_1 = (__pyx_v_self->_pvt_ptr != NULL); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":13139 * def __repr__(self): * if self._pvt_ptr is not NULL: * str_list = [] # <<<<<<<<<<<<<< * try: * str_list += ['type : ' + str(self.type)] */ __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_str_list = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":13140 * if self._pvt_ptr is not NULL: * str_list = [] * try: # <<<<<<<<<<<<<< * str_list += ['type : ' + str(self.type)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "cuda/bindings/runtime.pyx":13141 * str_list = [] * try: * str_list += ['type : ' + str(self.type)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['type : '] */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_type_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13141, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __Pyx_PyObject_Unicode(__pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13141, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_type, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13141, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13141, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 13141, __pyx_L4_error); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13141, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_2)); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":13140 * if self._pvt_ptr is not NULL: * str_list = [] * try: # <<<<<<<<<<<<<< * str_list += ['type : ' + str(self.type)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; /* "cuda/bindings/runtime.pyx":13142 * try: * str_list += ['type : ' + str(self.type)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['type : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaAsyncNotificationInfo.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_2, &__pyx_t_6, &__pyx_t_8) < 0) __PYX_ERR(0, 13142, __pyx_L6_except_error) __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_8); /* "cuda/bindings/runtime.pyx":13143 * str_list += ['type : ' + str(self.type)] * except ValueError: * str_list += ['type : '] # <<<<<<<<<<<<<< * try: * str_list += ['info :\n' + '\n'.join([' ' + line for line in str(self.info).splitlines()])] */ __pyx_t_9 = PyList_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 13143, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_type_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_type_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 0, __pyx_mstate_global->__pyx_kp_u_type_ValueError) != (0)) __PYX_ERR(0, 13143, __pyx_L6_except_error); __pyx_t_10 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13143, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_10)); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L5_exception_handled; } goto __pyx_L6_except_error; /* "cuda/bindings/runtime.pyx":13140 * if self._pvt_ptr is not NULL: * str_list = [] * try: # <<<<<<<<<<<<<< * str_list += ['type : ' + str(self.type)] * except ValueError: */ __pyx_L6_except_error:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L5_exception_handled:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); __pyx_L9_try_end:; } /* "cuda/bindings/runtime.pyx":13144 * except ValueError: * str_list += ['type : '] * try: # <<<<<<<<<<<<<< * str_list += ['info :\n' + '\n'.join([' ' + line for line in str(self.info).splitlines()])] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_5, &__pyx_t_4, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_5); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "cuda/bindings/runtime.pyx":13145 * str_list += ['type : '] * try: * str_list += ['info :\n' + '\n'.join([' ' + line for line in str(self.info).splitlines()])] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['info : '] */ { /* enter inner scope */ __pyx_t_8 = PyList_New(0); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13145, __pyx_L20_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_info_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13145, __pyx_L20_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_2 = __Pyx_PyObject_Unicode(__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13145, __pyx_L20_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyUnicode_Splitlines(((PyObject*)__pyx_t_2), 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13145, __pyx_L20_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __pyx_t_6; __Pyx_INCREF(__pyx_t_2); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; for (;;) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_2); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 13145, __pyx_L20_error) #endif if (__pyx_t_11 >= __pyx_temp) break; } __pyx_t_6 = __Pyx_PyList_GetItemRef(__pyx_t_2, __pyx_t_11); ++__pyx_t_11; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13145, __pyx_L20_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_10genexpr178__pyx_v_line, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyNumber_Add(__pyx_mstate_global->__pyx_kp_u__4, __pyx_10genexpr178__pyx_v_line); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13145, __pyx_L20_error) __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_8, (PyObject*)__pyx_t_6))) __PYX_ERR(0, 13145, __pyx_L20_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_10genexpr178__pyx_v_line); __pyx_10genexpr178__pyx_v_line = 0; goto __pyx_L24_exit_scope; __pyx_L20_error:; __Pyx_XDECREF(__pyx_10genexpr178__pyx_v_line); __pyx_10genexpr178__pyx_v_line = 0; goto __pyx_L12_error; __pyx_L24_exit_scope:; } /* exit inner scope */ __pyx_t_2 = PyUnicode_Join(__pyx_mstate_global->__pyx_kp_u__2, __pyx_t_8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13145, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_info, __pyx_t_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13145, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13145, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_8); if (__Pyx_PyList_SET_ITEM(__pyx_t_2, 0, __pyx_t_8) != (0)) __PYX_ERR(0, 13145, __pyx_L12_error); __pyx_t_8 = 0; __pyx_t_8 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13145, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_8)); __pyx_t_8 = 0; /* "cuda/bindings/runtime.pyx":13144 * except ValueError: * str_list += ['type : '] * try: # <<<<<<<<<<<<<< * str_list += ['info :\n' + '\n'.join([' ' + line for line in str(self.info).splitlines()])] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L17_try_end; __pyx_L12_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":13146 * try: * str_list += ['info :\n' + '\n'.join([' ' + line for line in str(self.info).splitlines()])] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['info : '] * return '\n'.join(str_list) */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaAsyncNotificationInfo.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_8, &__pyx_t_2, &__pyx_t_6) < 0) __PYX_ERR(0, 13146, __pyx_L14_except_error) __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_6); /* "cuda/bindings/runtime.pyx":13147 * str_list += ['info :\n' + '\n'.join([' ' + line for line in str(self.info).splitlines()])] * except ValueError: * str_list += ['info : '] # <<<<<<<<<<<<<< * return '\n'.join(str_list) * else: */ __pyx_t_10 = PyList_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13147, __pyx_L14_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_info_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_info_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_10, 0, __pyx_mstate_global->__pyx_kp_u_info_ValueError) != (0)) __PYX_ERR(0, 13147, __pyx_L14_except_error); __pyx_t_9 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 13147, __pyx_L14_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_9)); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; goto __pyx_L13_exception_handled; } goto __pyx_L14_except_error; /* "cuda/bindings/runtime.pyx":13144 * except ValueError: * str_list += ['type : '] * try: # <<<<<<<<<<<<<< * str_list += ['info :\n' + '\n'.join([' ' + line for line in str(self.info).splitlines()])] * except ValueError: */ __pyx_L14_except_error:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); goto __pyx_L1_error; __pyx_L13_exception_handled:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); __pyx_L17_try_end:; } /* "cuda/bindings/runtime.pyx":13148 * except ValueError: * str_list += ['info : '] * return '\n'.join(str_list) # <<<<<<<<<<<<<< * else: * return '' */ __Pyx_XDECREF(__pyx_r); __pyx_t_6 = PyUnicode_Join(__pyx_mstate_global->__pyx_kp_u__2, __pyx_v_str_list); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13148, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13138 * return self._pvt_ptr * def __repr__(self): * if self._pvt_ptr is not NULL: # <<<<<<<<<<<<<< * str_list = [] * try: */ } /* "cuda/bindings/runtime.pyx":13150 * return '\n'.join(str_list) * else: * return '' # <<<<<<<<<<<<<< * @property * def type(self): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u__3); __pyx_r = __pyx_mstate_global->__pyx_kp_u__3; goto __pyx_L0; } /* "cuda/bindings/runtime.pyx":13137 * def getPtr(self): * return self._pvt_ptr * def __repr__(self): # <<<<<<<<<<<<<< * if self._pvt_ptr is not NULL: * str_list = [] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("cuda.bindings.runtime.cudaAsyncNotificationInfo.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_str_list); __Pyx_XDECREF(__pyx_10genexpr178__pyx_v_line); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13151 * else: * return '' * @property # <<<<<<<<<<<<<< * def type(self): * if self._pvt_ptr[0].type not in _dict_cudaAsyncNotificationType: */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_4type_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_4type_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_4type___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaAsyncNotificationInfo *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_4type___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaAsyncNotificationInfo *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":13153 * @property * def type(self): * if self._pvt_ptr[0].type not in _dict_cudaAsyncNotificationType: # <<<<<<<<<<<<<< * return None * return _dict_cudaAsyncNotificationType[self._pvt_ptr[0].type] */ __pyx_t_1 = __Pyx_PyLong_From_enum__cudaAsyncNotificationType_enum((__pyx_v_self->_pvt_ptr[0]).type); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaAsyncNotificationType); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = (__Pyx_PySequence_ContainsTF(__pyx_t_1, __pyx_t_2, Py_NE)); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 13153, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (__pyx_t_3) { /* "cuda/bindings/runtime.pyx":13154 * def type(self): * if self._pvt_ptr[0].type not in _dict_cudaAsyncNotificationType: * return None # <<<<<<<<<<<<<< * return _dict_cudaAsyncNotificationType[self._pvt_ptr[0].type] * @type.setter */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13153 * @property * def type(self): * if self._pvt_ptr[0].type not in _dict_cudaAsyncNotificationType: # <<<<<<<<<<<<<< * return None * return _dict_cudaAsyncNotificationType[self._pvt_ptr[0].type] */ } /* "cuda/bindings/runtime.pyx":13155 * if self._pvt_ptr[0].type not in _dict_cudaAsyncNotificationType: * return None * return _dict_cudaAsyncNotificationType[self._pvt_ptr[0].type] # <<<<<<<<<<<<<< * @type.setter * def type(self, type not None : cudaAsyncNotificationType): */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaAsyncNotificationType); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13155, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyLong_From_enum__cudaAsyncNotificationType_enum((__pyx_v_self->_pvt_ptr[0]).type); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13155, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_2, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13155, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13151 * else: * return '' * @property # <<<<<<<<<<<<<< * def type(self): * if self._pvt_ptr[0].type not in _dict_cudaAsyncNotificationType: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaAsyncNotificationInfo.type.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13156 * return None * return _dict_cudaAsyncNotificationType[self._pvt_ptr[0].type] * @type.setter # <<<<<<<<<<<<<< * def type(self, type not None : cudaAsyncNotificationType): * self._pvt_ptr[0].type = type.value */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_4type_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_type); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_4type_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_type) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); if (unlikely(((PyObject *)__pyx_v_type) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "type"); __PYX_ERR(0, 13157, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_4type_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaAsyncNotificationInfo *)__pyx_v_self), ((PyObject *)__pyx_v_type)); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; goto __pyx_L5_cleaned_up; __pyx_L0:; __pyx_L5_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_4type_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaAsyncNotificationInfo *__pyx_v_self, PyObject *__pyx_v_type) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; cudaAsyncNotificationType __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); /* "cuda/bindings/runtime.pyx":13158 * @type.setter * def type(self, type not None : cudaAsyncNotificationType): * self._pvt_ptr[0].type = type.value # <<<<<<<<<<<<<< * @property * def info(self): */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_type, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13158, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = ((enum cudaAsyncNotificationType_enum)__Pyx_PyLong_As_enum__cudaAsyncNotificationType_enum(__pyx_t_1)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 13158, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; (__pyx_v_self->_pvt_ptr[0]).type = __pyx_t_2; /* "cuda/bindings/runtime.pyx":13156 * return None * return _dict_cudaAsyncNotificationType[self._pvt_ptr[0].type] * @type.setter # <<<<<<<<<<<<<< * def type(self, type not None : cudaAsyncNotificationType): * self._pvt_ptr[0].type = type.value */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaAsyncNotificationInfo.type.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13159 * def type(self, type not None : cudaAsyncNotificationType): * self._pvt_ptr[0].type = type.value * @property # <<<<<<<<<<<<<< * def info(self): * return self._info */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_4info_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_4info_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_4info___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaAsyncNotificationInfo *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_4info___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaAsyncNotificationInfo *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":13161 * @property * def info(self): * return self._info # <<<<<<<<<<<<<< * @info.setter * def info(self, info not None : anon_union10): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF((PyObject *)__pyx_v_self->_info); __pyx_r = ((PyObject *)__pyx_v_self->_info); goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13159 * def type(self, type not None : cudaAsyncNotificationType): * self._pvt_ptr[0].type = type.value * @property # <<<<<<<<<<<<<< * def info(self): * return self._info */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13162 * def info(self): * return self._info * @info.setter # <<<<<<<<<<<<<< * def info(self, info not None : anon_union10): * string.memcpy(&self._pvt_ptr[0].info, info.getPtr(), sizeof(self._pvt_ptr[0].info)) */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_4info_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_info); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_4info_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_info) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_info), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_anon_union10, 0, "info", 0))) __PYX_ERR(0, 13163, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_4info_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaAsyncNotificationInfo *)__pyx_v_self), ((struct __pyx_obj_4cuda_8bindings_7runtime_anon_union10 *)__pyx_v_info)); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; goto __pyx_L5_cleaned_up; __pyx_L0:; __pyx_L5_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_4info_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaAsyncNotificationInfo *__pyx_v_self, struct __pyx_obj_4cuda_8bindings_7runtime_anon_union10 *__pyx_v_info) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; size_t __pyx_t_3; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); /* "cuda/bindings/runtime.pyx":13164 * @info.setter * def info(self, info not None : anon_union10): * string.memcpy(&self._pvt_ptr[0].info, info.getPtr(), sizeof(self._pvt_ptr[0].info)) # <<<<<<<<<<<<<< * * cdef class cudaTextureDesc: */ __pyx_t_2 = ((PyObject *)__pyx_v_info); __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCallMethod(__pyx_mstate_global->__pyx_n_u_getPtr, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13164, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; (void)(memcpy((&(__pyx_v_self->_pvt_ptr[0]).info), ((union anon_union10 *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_4)), (sizeof((__pyx_v_self->_pvt_ptr[0]).info)))); /* "cuda/bindings/runtime.pyx":13162 * def info(self): * return self._info * @info.setter # <<<<<<<<<<<<<< * def info(self, info not None : anon_union10): * string.memcpy(&self._pvt_ptr[0].info, info.getPtr(), sizeof(self._pvt_ptr[0].info)) */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("cuda.bindings.runtime.cudaAsyncNotificationInfo.info.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_10__reduce_cython__, "cudaAsyncNotificationInfo.__reduce_cython__(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_11__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_11__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_10__reduce_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_10__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaAsyncNotificationInfo *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_10__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_cudaAsyncNotificationInfo *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaAsyncNotificationInfo.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_12__setstate_cython__, "cudaAsyncNotificationInfo.__setstate_cython__(self, __pyx_state)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_13__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_13__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_12__setstate_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(2, 3, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(2, 3, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(2, 3, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) } __pyx_v___pyx_state = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(2, 3, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaAsyncNotificationInfo.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_12__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaAsyncNotificationInfo *)__pyx_v_self), __pyx_v___pyx_state); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_25cudaAsyncNotificationInfo_12__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_cudaAsyncNotificationInfo *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaAsyncNotificationInfo.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13204 * Get memory address of class instance * """ * def __cinit__(self, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * if _ptr == 0: * self._pvt_ptr = &self._pvt_val */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr; CYTHON_UNUSED Py_ssize_t __pyx_nargs; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1; #endif __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13204, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13204, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__cinit__", 0) < (0)) __PYX_ERR(0, 13204, __pyx_L3_error) } else { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13204, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } } if (values[0]) { __pyx_v__ptr = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[0]); if (unlikely((__pyx_v__ptr == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13204, __pyx_L3_error) } else { __pyx_v__ptr = ((__pyx_t_4cuda_8bindings_7runtime_void_ptr)0); } } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 13204, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc___cinit__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)__pyx_v_self), __pyx_v__ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc___cinit__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_self, __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr) { int __pyx_r; int __pyx_t_1; /* "cuda/bindings/runtime.pyx":13205 * """ * def __cinit__(self, void_ptr _ptr = 0): * if _ptr == 0: # <<<<<<<<<<<<<< * self._pvt_ptr = &self._pvt_val * else: */ __pyx_t_1 = (__pyx_v__ptr == 0); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":13206 * def __cinit__(self, void_ptr _ptr = 0): * if _ptr == 0: * self._pvt_ptr = &self._pvt_val # <<<<<<<<<<<<<< * else: * self._pvt_ptr = _ptr */ __pyx_v_self->_pvt_ptr = (&__pyx_v_self->_pvt_val); /* "cuda/bindings/runtime.pyx":13205 * """ * def __cinit__(self, void_ptr _ptr = 0): * if _ptr == 0: # <<<<<<<<<<<<<< * self._pvt_ptr = &self._pvt_val * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":13208 * self._pvt_ptr = &self._pvt_val * else: * self._pvt_ptr = _ptr # <<<<<<<<<<<<<< * def __init__(self, void_ptr _ptr = 0): * pass */ /*else*/ { __pyx_v_self->_pvt_ptr = ((struct cudaTextureDesc *)__pyx_v__ptr); } __pyx_L3:; /* "cuda/bindings/runtime.pyx":13204 * Get memory address of class instance * """ * def __cinit__(self, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * if _ptr == 0: * self._pvt_ptr = &self._pvt_val */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":13209 * else: * self._pvt_ptr = _ptr * def __init__(self, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * pass * def __dealloc__(self): */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_3__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_3__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr; CYTHON_UNUSED Py_ssize_t __pyx_nargs; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1; #endif __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13209, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13209, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__init__", 0) < (0)) __PYX_ERR(0, 13209, __pyx_L3_error) } else { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13209, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } } if (values[0]) { __pyx_v__ptr = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[0]); if (unlikely((__pyx_v__ptr == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13209, __pyx_L3_error) } else { __pyx_v__ptr = ((__pyx_t_4cuda_8bindings_7runtime_void_ptr)0); } } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 13209, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_2__init__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)__pyx_v_self), __pyx_v__ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_2__init__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_self, CYTHON_UNUSED __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr) { int __pyx_r; /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":13211 * def __init__(self, void_ptr _ptr = 0): * pass * def __dealloc__(self): # <<<<<<<<<<<<<< * pass * def getPtr(self): */ /* Python wrapper */ static void __pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_5__dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_5__dealloc__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_4__dealloc__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_4__dealloc__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_self) { /* function exit code */ } /* "cuda/bindings/runtime.pyx":13213 * def __dealloc__(self): * pass * def getPtr(self): # <<<<<<<<<<<<<< * return self._pvt_ptr * def __repr__(self): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_7getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_15cudaTextureDesc_6getPtr, "cudaTextureDesc.getPtr(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_15cudaTextureDesc_7getPtr = {"getPtr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_7getPtr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_15cudaTextureDesc_6getPtr}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_7getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("getPtr (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("getPtr", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("getPtr", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_6getPtr(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_6getPtr(struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("getPtr", 0); /* "cuda/bindings/runtime.pyx":13214 * pass * def getPtr(self): * return self._pvt_ptr # <<<<<<<<<<<<<< * def __repr__(self): * if self._pvt_ptr is not NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_v_self->_pvt_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13214, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13213 * def __dealloc__(self): * pass * def getPtr(self): # <<<<<<<<<<<<<< * return self._pvt_ptr * def __repr__(self): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.getPtr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13215 * def getPtr(self): * return self._pvt_ptr * def __repr__(self): # <<<<<<<<<<<<<< * if self._pvt_ptr is not NULL: * str_list = [] */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_9__repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_9__repr__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_8__repr__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_8__repr__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_self) { PyObject *__pyx_v_str_list = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "cuda/bindings/runtime.pyx":13216 * return self._pvt_ptr * def __repr__(self): * if self._pvt_ptr is not NULL: # <<<<<<<<<<<<<< * str_list = [] * try: */ __pyx_t_1 = (__pyx_v_self->_pvt_ptr != NULL); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":13217 * def __repr__(self): * if self._pvt_ptr is not NULL: * str_list = [] # <<<<<<<<<<<<<< * try: * str_list += ['addressMode : ' + str(self.addressMode)] */ __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13217, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_str_list = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":13218 * if self._pvt_ptr is not NULL: * str_list = [] * try: # <<<<<<<<<<<<<< * str_list += ['addressMode : ' + str(self.addressMode)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "cuda/bindings/runtime.pyx":13219 * str_list = [] * try: * str_list += ['addressMode : ' + str(self.addressMode)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['addressMode : '] */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_addressMode_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13219, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __Pyx_PyObject_Unicode(__pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13219, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_addressMode, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13219, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13219, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 13219, __pyx_L4_error); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13219, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_2)); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":13218 * if self._pvt_ptr is not NULL: * str_list = [] * try: # <<<<<<<<<<<<<< * str_list += ['addressMode : ' + str(self.addressMode)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; /* "cuda/bindings/runtime.pyx":13220 * try: * str_list += ['addressMode : ' + str(self.addressMode)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['addressMode : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_2, &__pyx_t_6, &__pyx_t_8) < 0) __PYX_ERR(0, 13220, __pyx_L6_except_error) __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_8); /* "cuda/bindings/runtime.pyx":13221 * str_list += ['addressMode : ' + str(self.addressMode)] * except ValueError: * str_list += ['addressMode : '] # <<<<<<<<<<<<<< * try: * str_list += ['filterMode : ' + str(self.filterMode)] */ __pyx_t_9 = PyList_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 13221, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_addressMode_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_addressMode_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 0, __pyx_mstate_global->__pyx_kp_u_addressMode_ValueError) != (0)) __PYX_ERR(0, 13221, __pyx_L6_except_error); __pyx_t_10 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13221, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_10)); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L5_exception_handled; } goto __pyx_L6_except_error; /* "cuda/bindings/runtime.pyx":13218 * if self._pvt_ptr is not NULL: * str_list = [] * try: # <<<<<<<<<<<<<< * str_list += ['addressMode : ' + str(self.addressMode)] * except ValueError: */ __pyx_L6_except_error:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L5_exception_handled:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); __pyx_L9_try_end:; } /* "cuda/bindings/runtime.pyx":13222 * except ValueError: * str_list += ['addressMode : '] * try: # <<<<<<<<<<<<<< * str_list += ['filterMode : ' + str(self.filterMode)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_5, &__pyx_t_4, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_5); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "cuda/bindings/runtime.pyx":13223 * str_list += ['addressMode : '] * try: * str_list += ['filterMode : ' + str(self.filterMode)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['filterMode : '] */ __pyx_t_8 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_filterMode_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13223, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_6 = __Pyx_PyObject_Unicode(__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13223, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_filterMode, __pyx_t_6); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13223, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13223, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_8); if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_8) != (0)) __PYX_ERR(0, 13223, __pyx_L12_error); __pyx_t_8 = 0; __pyx_t_8 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_6); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13223, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_8)); __pyx_t_8 = 0; /* "cuda/bindings/runtime.pyx":13222 * except ValueError: * str_list += ['addressMode : '] * try: # <<<<<<<<<<<<<< * str_list += ['filterMode : ' + str(self.filterMode)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L17_try_end; __pyx_L12_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":13224 * try: * str_list += ['filterMode : ' + str(self.filterMode)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['filterMode : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_8, &__pyx_t_6, &__pyx_t_2) < 0) __PYX_ERR(0, 13224, __pyx_L14_except_error) __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_2); /* "cuda/bindings/runtime.pyx":13225 * str_list += ['filterMode : ' + str(self.filterMode)] * except ValueError: * str_list += ['filterMode : '] # <<<<<<<<<<<<<< * try: * str_list += ['readMode : ' + str(self.readMode)] */ __pyx_t_10 = PyList_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13225, __pyx_L14_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_filterMode_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_filterMode_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_10, 0, __pyx_mstate_global->__pyx_kp_u_filterMode_ValueError) != (0)) __PYX_ERR(0, 13225, __pyx_L14_except_error); __pyx_t_9 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 13225, __pyx_L14_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_9)); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; goto __pyx_L13_exception_handled; } goto __pyx_L14_except_error; /* "cuda/bindings/runtime.pyx":13222 * except ValueError: * str_list += ['addressMode : '] * try: # <<<<<<<<<<<<<< * str_list += ['filterMode : ' + str(self.filterMode)] * except ValueError: */ __pyx_L14_except_error:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); goto __pyx_L1_error; __pyx_L13_exception_handled:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); __pyx_L17_try_end:; } /* "cuda/bindings/runtime.pyx":13226 * except ValueError: * str_list += ['filterMode : '] * try: # <<<<<<<<<<<<<< * str_list += ['readMode : ' + str(self.readMode)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "cuda/bindings/runtime.pyx":13227 * str_list += ['filterMode : '] * try: * str_list += ['readMode : ' + str(self.readMode)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['readMode : '] */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_readMode_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13227, __pyx_L20_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __Pyx_PyObject_Unicode(__pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13227, __pyx_L20_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_readMode, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13227, __pyx_L20_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13227, __pyx_L20_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 13227, __pyx_L20_error); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13227, __pyx_L20_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_2)); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":13226 * except ValueError: * str_list += ['filterMode : '] * try: # <<<<<<<<<<<<<< * str_list += ['readMode : ' + str(self.readMode)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L25_try_end; __pyx_L20_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":13228 * try: * str_list += ['readMode : ' + str(self.readMode)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['readMode : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_2, &__pyx_t_6, &__pyx_t_8) < 0) __PYX_ERR(0, 13228, __pyx_L22_except_error) __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_8); /* "cuda/bindings/runtime.pyx":13229 * str_list += ['readMode : ' + str(self.readMode)] * except ValueError: * str_list += ['readMode : '] # <<<<<<<<<<<<<< * try: * str_list += ['sRGB : ' + str(self.sRGB)] */ __pyx_t_9 = PyList_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 13229, __pyx_L22_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_readMode_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_readMode_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 0, __pyx_mstate_global->__pyx_kp_u_readMode_ValueError) != (0)) __PYX_ERR(0, 13229, __pyx_L22_except_error); __pyx_t_10 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13229, __pyx_L22_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_10)); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L21_exception_handled; } goto __pyx_L22_except_error; /* "cuda/bindings/runtime.pyx":13226 * except ValueError: * str_list += ['filterMode : '] * try: # <<<<<<<<<<<<<< * str_list += ['readMode : ' + str(self.readMode)] * except ValueError: */ __pyx_L22_except_error:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L21_exception_handled:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); __pyx_L25_try_end:; } /* "cuda/bindings/runtime.pyx":13230 * except ValueError: * str_list += ['readMode : '] * try: # <<<<<<<<<<<<<< * str_list += ['sRGB : ' + str(self.sRGB)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_5, &__pyx_t_4, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_5); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "cuda/bindings/runtime.pyx":13231 * str_list += ['readMode : '] * try: * str_list += ['sRGB : ' + str(self.sRGB)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['sRGB : '] */ __pyx_t_8 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_sRGB_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13231, __pyx_L28_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_6 = __Pyx_PyObject_Unicode(__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13231, __pyx_L28_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_sRGB, __pyx_t_6); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13231, __pyx_L28_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13231, __pyx_L28_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_8); if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_8) != (0)) __PYX_ERR(0, 13231, __pyx_L28_error); __pyx_t_8 = 0; __pyx_t_8 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_6); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13231, __pyx_L28_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_8)); __pyx_t_8 = 0; /* "cuda/bindings/runtime.pyx":13230 * except ValueError: * str_list += ['readMode : '] * try: # <<<<<<<<<<<<<< * str_list += ['sRGB : ' + str(self.sRGB)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L33_try_end; __pyx_L28_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":13232 * try: * str_list += ['sRGB : ' + str(self.sRGB)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['sRGB : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_8, &__pyx_t_6, &__pyx_t_2) < 0) __PYX_ERR(0, 13232, __pyx_L30_except_error) __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_2); /* "cuda/bindings/runtime.pyx":13233 * str_list += ['sRGB : ' + str(self.sRGB)] * except ValueError: * str_list += ['sRGB : '] # <<<<<<<<<<<<<< * try: * str_list += ['borderColor : ' + str(self.borderColor)] */ __pyx_t_10 = PyList_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13233, __pyx_L30_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_sRGB_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_sRGB_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_10, 0, __pyx_mstate_global->__pyx_kp_u_sRGB_ValueError) != (0)) __PYX_ERR(0, 13233, __pyx_L30_except_error); __pyx_t_9 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 13233, __pyx_L30_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_9)); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; goto __pyx_L29_exception_handled; } goto __pyx_L30_except_error; /* "cuda/bindings/runtime.pyx":13230 * except ValueError: * str_list += ['readMode : '] * try: # <<<<<<<<<<<<<< * str_list += ['sRGB : ' + str(self.sRGB)] * except ValueError: */ __pyx_L30_except_error:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); goto __pyx_L1_error; __pyx_L29_exception_handled:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); __pyx_L33_try_end:; } /* "cuda/bindings/runtime.pyx":13234 * except ValueError: * str_list += ['sRGB : '] * try: # <<<<<<<<<<<<<< * str_list += ['borderColor : ' + str(self.borderColor)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "cuda/bindings/runtime.pyx":13235 * str_list += ['sRGB : '] * try: * str_list += ['borderColor : ' + str(self.borderColor)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['borderColor : '] */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_borderColor_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13235, __pyx_L36_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __Pyx_PyObject_Unicode(__pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13235, __pyx_L36_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_borderColor, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13235, __pyx_L36_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13235, __pyx_L36_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 13235, __pyx_L36_error); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13235, __pyx_L36_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_2)); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":13234 * except ValueError: * str_list += ['sRGB : '] * try: # <<<<<<<<<<<<<< * str_list += ['borderColor : ' + str(self.borderColor)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L41_try_end; __pyx_L36_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":13236 * try: * str_list += ['borderColor : ' + str(self.borderColor)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['borderColor : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_2, &__pyx_t_6, &__pyx_t_8) < 0) __PYX_ERR(0, 13236, __pyx_L38_except_error) __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_8); /* "cuda/bindings/runtime.pyx":13237 * str_list += ['borderColor : ' + str(self.borderColor)] * except ValueError: * str_list += ['borderColor : '] # <<<<<<<<<<<<<< * try: * str_list += ['normalizedCoords : ' + str(self.normalizedCoords)] */ __pyx_t_9 = PyList_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 13237, __pyx_L38_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_borderColor_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_borderColor_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 0, __pyx_mstate_global->__pyx_kp_u_borderColor_ValueError) != (0)) __PYX_ERR(0, 13237, __pyx_L38_except_error); __pyx_t_10 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13237, __pyx_L38_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_10)); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L37_exception_handled; } goto __pyx_L38_except_error; /* "cuda/bindings/runtime.pyx":13234 * except ValueError: * str_list += ['sRGB : '] * try: # <<<<<<<<<<<<<< * str_list += ['borderColor : ' + str(self.borderColor)] * except ValueError: */ __pyx_L38_except_error:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L37_exception_handled:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); __pyx_L41_try_end:; } /* "cuda/bindings/runtime.pyx":13238 * except ValueError: * str_list += ['borderColor : '] * try: # <<<<<<<<<<<<<< * str_list += ['normalizedCoords : ' + str(self.normalizedCoords)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_5, &__pyx_t_4, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_5); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "cuda/bindings/runtime.pyx":13239 * str_list += ['borderColor : '] * try: * str_list += ['normalizedCoords : ' + str(self.normalizedCoords)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['normalizedCoords : '] */ __pyx_t_8 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_normalizedCoords_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13239, __pyx_L44_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_6 = __Pyx_PyObject_Unicode(__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13239, __pyx_L44_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_normalizedCoords, __pyx_t_6); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13239, __pyx_L44_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13239, __pyx_L44_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_8); if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_8) != (0)) __PYX_ERR(0, 13239, __pyx_L44_error); __pyx_t_8 = 0; __pyx_t_8 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_6); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13239, __pyx_L44_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_8)); __pyx_t_8 = 0; /* "cuda/bindings/runtime.pyx":13238 * except ValueError: * str_list += ['borderColor : '] * try: # <<<<<<<<<<<<<< * str_list += ['normalizedCoords : ' + str(self.normalizedCoords)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L49_try_end; __pyx_L44_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":13240 * try: * str_list += ['normalizedCoords : ' + str(self.normalizedCoords)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['normalizedCoords : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_8, &__pyx_t_6, &__pyx_t_2) < 0) __PYX_ERR(0, 13240, __pyx_L46_except_error) __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_2); /* "cuda/bindings/runtime.pyx":13241 * str_list += ['normalizedCoords : ' + str(self.normalizedCoords)] * except ValueError: * str_list += ['normalizedCoords : '] # <<<<<<<<<<<<<< * try: * str_list += ['maxAnisotropy : ' + str(self.maxAnisotropy)] */ __pyx_t_10 = PyList_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13241, __pyx_L46_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_normalizedCoords_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_normalizedCoords_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_10, 0, __pyx_mstate_global->__pyx_kp_u_normalizedCoords_ValueError) != (0)) __PYX_ERR(0, 13241, __pyx_L46_except_error); __pyx_t_9 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 13241, __pyx_L46_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_9)); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; goto __pyx_L45_exception_handled; } goto __pyx_L46_except_error; /* "cuda/bindings/runtime.pyx":13238 * except ValueError: * str_list += ['borderColor : '] * try: # <<<<<<<<<<<<<< * str_list += ['normalizedCoords : ' + str(self.normalizedCoords)] * except ValueError: */ __pyx_L46_except_error:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); goto __pyx_L1_error; __pyx_L45_exception_handled:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); __pyx_L49_try_end:; } /* "cuda/bindings/runtime.pyx":13242 * except ValueError: * str_list += ['normalizedCoords : '] * try: # <<<<<<<<<<<<<< * str_list += ['maxAnisotropy : ' + str(self.maxAnisotropy)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "cuda/bindings/runtime.pyx":13243 * str_list += ['normalizedCoords : '] * try: * str_list += ['maxAnisotropy : ' + str(self.maxAnisotropy)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['maxAnisotropy : '] */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_maxAnisotropy_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13243, __pyx_L52_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __Pyx_PyObject_Unicode(__pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13243, __pyx_L52_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_maxAnisotropy, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13243, __pyx_L52_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13243, __pyx_L52_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 13243, __pyx_L52_error); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13243, __pyx_L52_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_2)); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":13242 * except ValueError: * str_list += ['normalizedCoords : '] * try: # <<<<<<<<<<<<<< * str_list += ['maxAnisotropy : ' + str(self.maxAnisotropy)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L57_try_end; __pyx_L52_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":13244 * try: * str_list += ['maxAnisotropy : ' + str(self.maxAnisotropy)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['maxAnisotropy : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_2, &__pyx_t_6, &__pyx_t_8) < 0) __PYX_ERR(0, 13244, __pyx_L54_except_error) __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_8); /* "cuda/bindings/runtime.pyx":13245 * str_list += ['maxAnisotropy : ' + str(self.maxAnisotropy)] * except ValueError: * str_list += ['maxAnisotropy : '] # <<<<<<<<<<<<<< * try: * str_list += ['mipmapFilterMode : ' + str(self.mipmapFilterMode)] */ __pyx_t_9 = PyList_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 13245, __pyx_L54_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_maxAnisotropy_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_maxAnisotropy_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 0, __pyx_mstate_global->__pyx_kp_u_maxAnisotropy_ValueError) != (0)) __PYX_ERR(0, 13245, __pyx_L54_except_error); __pyx_t_10 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13245, __pyx_L54_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_10)); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L53_exception_handled; } goto __pyx_L54_except_error; /* "cuda/bindings/runtime.pyx":13242 * except ValueError: * str_list += ['normalizedCoords : '] * try: # <<<<<<<<<<<<<< * str_list += ['maxAnisotropy : ' + str(self.maxAnisotropy)] * except ValueError: */ __pyx_L54_except_error:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L53_exception_handled:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); __pyx_L57_try_end:; } /* "cuda/bindings/runtime.pyx":13246 * except ValueError: * str_list += ['maxAnisotropy : '] * try: # <<<<<<<<<<<<<< * str_list += ['mipmapFilterMode : ' + str(self.mipmapFilterMode)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_5, &__pyx_t_4, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_5); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "cuda/bindings/runtime.pyx":13247 * str_list += ['maxAnisotropy : '] * try: * str_list += ['mipmapFilterMode : ' + str(self.mipmapFilterMode)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['mipmapFilterMode : '] */ __pyx_t_8 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_mipmapFilterMode_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13247, __pyx_L60_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_6 = __Pyx_PyObject_Unicode(__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13247, __pyx_L60_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_mipmapFilterMode, __pyx_t_6); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13247, __pyx_L60_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13247, __pyx_L60_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_8); if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_8) != (0)) __PYX_ERR(0, 13247, __pyx_L60_error); __pyx_t_8 = 0; __pyx_t_8 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_6); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13247, __pyx_L60_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_8)); __pyx_t_8 = 0; /* "cuda/bindings/runtime.pyx":13246 * except ValueError: * str_list += ['maxAnisotropy : '] * try: # <<<<<<<<<<<<<< * str_list += ['mipmapFilterMode : ' + str(self.mipmapFilterMode)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L65_try_end; __pyx_L60_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":13248 * try: * str_list += ['mipmapFilterMode : ' + str(self.mipmapFilterMode)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['mipmapFilterMode : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_8, &__pyx_t_6, &__pyx_t_2) < 0) __PYX_ERR(0, 13248, __pyx_L62_except_error) __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_2); /* "cuda/bindings/runtime.pyx":13249 * str_list += ['mipmapFilterMode : ' + str(self.mipmapFilterMode)] * except ValueError: * str_list += ['mipmapFilterMode : '] # <<<<<<<<<<<<<< * try: * str_list += ['mipmapLevelBias : ' + str(self.mipmapLevelBias)] */ __pyx_t_10 = PyList_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13249, __pyx_L62_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_mipmapFilterMode_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_mipmapFilterMode_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_10, 0, __pyx_mstate_global->__pyx_kp_u_mipmapFilterMode_ValueError) != (0)) __PYX_ERR(0, 13249, __pyx_L62_except_error); __pyx_t_9 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 13249, __pyx_L62_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_9)); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; goto __pyx_L61_exception_handled; } goto __pyx_L62_except_error; /* "cuda/bindings/runtime.pyx":13246 * except ValueError: * str_list += ['maxAnisotropy : '] * try: # <<<<<<<<<<<<<< * str_list += ['mipmapFilterMode : ' + str(self.mipmapFilterMode)] * except ValueError: */ __pyx_L62_except_error:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); goto __pyx_L1_error; __pyx_L61_exception_handled:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); __pyx_L65_try_end:; } /* "cuda/bindings/runtime.pyx":13250 * except ValueError: * str_list += ['mipmapFilterMode : '] * try: # <<<<<<<<<<<<<< * str_list += ['mipmapLevelBias : ' + str(self.mipmapLevelBias)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "cuda/bindings/runtime.pyx":13251 * str_list += ['mipmapFilterMode : '] * try: * str_list += ['mipmapLevelBias : ' + str(self.mipmapLevelBias)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['mipmapLevelBias : '] */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_mipmapLevelBias_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13251, __pyx_L68_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __Pyx_PyObject_Unicode(__pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13251, __pyx_L68_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_mipmapLevelBias, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13251, __pyx_L68_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13251, __pyx_L68_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 13251, __pyx_L68_error); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13251, __pyx_L68_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_2)); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":13250 * except ValueError: * str_list += ['mipmapFilterMode : '] * try: # <<<<<<<<<<<<<< * str_list += ['mipmapLevelBias : ' + str(self.mipmapLevelBias)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L73_try_end; __pyx_L68_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":13252 * try: * str_list += ['mipmapLevelBias : ' + str(self.mipmapLevelBias)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['mipmapLevelBias : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_2, &__pyx_t_6, &__pyx_t_8) < 0) __PYX_ERR(0, 13252, __pyx_L70_except_error) __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_8); /* "cuda/bindings/runtime.pyx":13253 * str_list += ['mipmapLevelBias : ' + str(self.mipmapLevelBias)] * except ValueError: * str_list += ['mipmapLevelBias : '] # <<<<<<<<<<<<<< * try: * str_list += ['minMipmapLevelClamp : ' + str(self.minMipmapLevelClamp)] */ __pyx_t_9 = PyList_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 13253, __pyx_L70_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_mipmapLevelBias_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_mipmapLevelBias_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 0, __pyx_mstate_global->__pyx_kp_u_mipmapLevelBias_ValueError) != (0)) __PYX_ERR(0, 13253, __pyx_L70_except_error); __pyx_t_10 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13253, __pyx_L70_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_10)); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L69_exception_handled; } goto __pyx_L70_except_error; /* "cuda/bindings/runtime.pyx":13250 * except ValueError: * str_list += ['mipmapFilterMode : '] * try: # <<<<<<<<<<<<<< * str_list += ['mipmapLevelBias : ' + str(self.mipmapLevelBias)] * except ValueError: */ __pyx_L70_except_error:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L69_exception_handled:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); __pyx_L73_try_end:; } /* "cuda/bindings/runtime.pyx":13254 * except ValueError: * str_list += ['mipmapLevelBias : '] * try: # <<<<<<<<<<<<<< * str_list += ['minMipmapLevelClamp : ' + str(self.minMipmapLevelClamp)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_5, &__pyx_t_4, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_5); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "cuda/bindings/runtime.pyx":13255 * str_list += ['mipmapLevelBias : '] * try: * str_list += ['minMipmapLevelClamp : ' + str(self.minMipmapLevelClamp)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['minMipmapLevelClamp : '] */ __pyx_t_8 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_minMipmapLevelClamp_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13255, __pyx_L76_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_6 = __Pyx_PyObject_Unicode(__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13255, __pyx_L76_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_minMipmapLevelClamp, __pyx_t_6); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13255, __pyx_L76_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13255, __pyx_L76_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_8); if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_8) != (0)) __PYX_ERR(0, 13255, __pyx_L76_error); __pyx_t_8 = 0; __pyx_t_8 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_6); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13255, __pyx_L76_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_8)); __pyx_t_8 = 0; /* "cuda/bindings/runtime.pyx":13254 * except ValueError: * str_list += ['mipmapLevelBias : '] * try: # <<<<<<<<<<<<<< * str_list += ['minMipmapLevelClamp : ' + str(self.minMipmapLevelClamp)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L81_try_end; __pyx_L76_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":13256 * try: * str_list += ['minMipmapLevelClamp : ' + str(self.minMipmapLevelClamp)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['minMipmapLevelClamp : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_8, &__pyx_t_6, &__pyx_t_2) < 0) __PYX_ERR(0, 13256, __pyx_L78_except_error) __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_2); /* "cuda/bindings/runtime.pyx":13257 * str_list += ['minMipmapLevelClamp : ' + str(self.minMipmapLevelClamp)] * except ValueError: * str_list += ['minMipmapLevelClamp : '] # <<<<<<<<<<<<<< * try: * str_list += ['maxMipmapLevelClamp : ' + str(self.maxMipmapLevelClamp)] */ __pyx_t_10 = PyList_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13257, __pyx_L78_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_minMipmapLevelClamp_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_minMipmapLevelClamp_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_10, 0, __pyx_mstate_global->__pyx_kp_u_minMipmapLevelClamp_ValueError) != (0)) __PYX_ERR(0, 13257, __pyx_L78_except_error); __pyx_t_9 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 13257, __pyx_L78_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_9)); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; goto __pyx_L77_exception_handled; } goto __pyx_L78_except_error; /* "cuda/bindings/runtime.pyx":13254 * except ValueError: * str_list += ['mipmapLevelBias : '] * try: # <<<<<<<<<<<<<< * str_list += ['minMipmapLevelClamp : ' + str(self.minMipmapLevelClamp)] * except ValueError: */ __pyx_L78_except_error:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); goto __pyx_L1_error; __pyx_L77_exception_handled:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); __pyx_L81_try_end:; } /* "cuda/bindings/runtime.pyx":13258 * except ValueError: * str_list += ['minMipmapLevelClamp : '] * try: # <<<<<<<<<<<<<< * str_list += ['maxMipmapLevelClamp : ' + str(self.maxMipmapLevelClamp)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "cuda/bindings/runtime.pyx":13259 * str_list += ['minMipmapLevelClamp : '] * try: * str_list += ['maxMipmapLevelClamp : ' + str(self.maxMipmapLevelClamp)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['maxMipmapLevelClamp : '] */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_maxMipmapLevelClamp_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13259, __pyx_L84_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __Pyx_PyObject_Unicode(__pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13259, __pyx_L84_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_maxMipmapLevelClamp, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13259, __pyx_L84_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13259, __pyx_L84_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 13259, __pyx_L84_error); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13259, __pyx_L84_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_2)); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":13258 * except ValueError: * str_list += ['minMipmapLevelClamp : '] * try: # <<<<<<<<<<<<<< * str_list += ['maxMipmapLevelClamp : ' + str(self.maxMipmapLevelClamp)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L89_try_end; __pyx_L84_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":13260 * try: * str_list += ['maxMipmapLevelClamp : ' + str(self.maxMipmapLevelClamp)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['maxMipmapLevelClamp : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_2, &__pyx_t_6, &__pyx_t_8) < 0) __PYX_ERR(0, 13260, __pyx_L86_except_error) __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_8); /* "cuda/bindings/runtime.pyx":13261 * str_list += ['maxMipmapLevelClamp : ' + str(self.maxMipmapLevelClamp)] * except ValueError: * str_list += ['maxMipmapLevelClamp : '] # <<<<<<<<<<<<<< * try: * str_list += ['disableTrilinearOptimization : ' + str(self.disableTrilinearOptimization)] */ __pyx_t_9 = PyList_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 13261, __pyx_L86_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_maxMipmapLevelClamp_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_maxMipmapLevelClamp_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 0, __pyx_mstate_global->__pyx_kp_u_maxMipmapLevelClamp_ValueError) != (0)) __PYX_ERR(0, 13261, __pyx_L86_except_error); __pyx_t_10 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13261, __pyx_L86_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_10)); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L85_exception_handled; } goto __pyx_L86_except_error; /* "cuda/bindings/runtime.pyx":13258 * except ValueError: * str_list += ['minMipmapLevelClamp : '] * try: # <<<<<<<<<<<<<< * str_list += ['maxMipmapLevelClamp : ' + str(self.maxMipmapLevelClamp)] * except ValueError: */ __pyx_L86_except_error:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L85_exception_handled:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); __pyx_L89_try_end:; } /* "cuda/bindings/runtime.pyx":13262 * except ValueError: * str_list += ['maxMipmapLevelClamp : '] * try: # <<<<<<<<<<<<<< * str_list += ['disableTrilinearOptimization : ' + str(self.disableTrilinearOptimization)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_5, &__pyx_t_4, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_5); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "cuda/bindings/runtime.pyx":13263 * str_list += ['maxMipmapLevelClamp : '] * try: * str_list += ['disableTrilinearOptimization : ' + str(self.disableTrilinearOptimization)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['disableTrilinearOptimization : '] */ __pyx_t_8 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_disableTrilinearOptimization_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13263, __pyx_L92_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_6 = __Pyx_PyObject_Unicode(__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13263, __pyx_L92_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_disableTrilinearOptimization, __pyx_t_6); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13263, __pyx_L92_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13263, __pyx_L92_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_8); if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_8) != (0)) __PYX_ERR(0, 13263, __pyx_L92_error); __pyx_t_8 = 0; __pyx_t_8 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_6); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13263, __pyx_L92_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_8)); __pyx_t_8 = 0; /* "cuda/bindings/runtime.pyx":13262 * except ValueError: * str_list += ['maxMipmapLevelClamp : '] * try: # <<<<<<<<<<<<<< * str_list += ['disableTrilinearOptimization : ' + str(self.disableTrilinearOptimization)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L97_try_end; __pyx_L92_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":13264 * try: * str_list += ['disableTrilinearOptimization : ' + str(self.disableTrilinearOptimization)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['disableTrilinearOptimization : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_8, &__pyx_t_6, &__pyx_t_2) < 0) __PYX_ERR(0, 13264, __pyx_L94_except_error) __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_2); /* "cuda/bindings/runtime.pyx":13265 * str_list += ['disableTrilinearOptimization : ' + str(self.disableTrilinearOptimization)] * except ValueError: * str_list += ['disableTrilinearOptimization : '] # <<<<<<<<<<<<<< * try: * str_list += ['seamlessCubemap : ' + str(self.seamlessCubemap)] */ __pyx_t_10 = PyList_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13265, __pyx_L94_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_disableTrilinearOptimization_Val); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_disableTrilinearOptimization_Val); if (__Pyx_PyList_SET_ITEM(__pyx_t_10, 0, __pyx_mstate_global->__pyx_kp_u_disableTrilinearOptimization_Val) != (0)) __PYX_ERR(0, 13265, __pyx_L94_except_error); __pyx_t_9 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 13265, __pyx_L94_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_9)); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; goto __pyx_L93_exception_handled; } goto __pyx_L94_except_error; /* "cuda/bindings/runtime.pyx":13262 * except ValueError: * str_list += ['maxMipmapLevelClamp : '] * try: # <<<<<<<<<<<<<< * str_list += ['disableTrilinearOptimization : ' + str(self.disableTrilinearOptimization)] * except ValueError: */ __pyx_L94_except_error:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); goto __pyx_L1_error; __pyx_L93_exception_handled:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); __pyx_L97_try_end:; } /* "cuda/bindings/runtime.pyx":13266 * except ValueError: * str_list += ['disableTrilinearOptimization : '] * try: # <<<<<<<<<<<<<< * str_list += ['seamlessCubemap : ' + str(self.seamlessCubemap)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "cuda/bindings/runtime.pyx":13267 * str_list += ['disableTrilinearOptimization : '] * try: * str_list += ['seamlessCubemap : ' + str(self.seamlessCubemap)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['seamlessCubemap : '] */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_seamlessCubemap_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13267, __pyx_L100_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __Pyx_PyObject_Unicode(__pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13267, __pyx_L100_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_seamlessCubemap, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13267, __pyx_L100_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13267, __pyx_L100_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 13267, __pyx_L100_error); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13267, __pyx_L100_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_2)); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":13266 * except ValueError: * str_list += ['disableTrilinearOptimization : '] * try: # <<<<<<<<<<<<<< * str_list += ['seamlessCubemap : ' + str(self.seamlessCubemap)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L105_try_end; __pyx_L100_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":13268 * try: * str_list += ['seamlessCubemap : ' + str(self.seamlessCubemap)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['seamlessCubemap : '] * return '\n'.join(str_list) */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_2, &__pyx_t_6, &__pyx_t_8) < 0) __PYX_ERR(0, 13268, __pyx_L102_except_error) __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_8); /* "cuda/bindings/runtime.pyx":13269 * str_list += ['seamlessCubemap : ' + str(self.seamlessCubemap)] * except ValueError: * str_list += ['seamlessCubemap : '] # <<<<<<<<<<<<<< * return '\n'.join(str_list) * else: */ __pyx_t_9 = PyList_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 13269, __pyx_L102_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_seamlessCubemap_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_seamlessCubemap_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 0, __pyx_mstate_global->__pyx_kp_u_seamlessCubemap_ValueError) != (0)) __PYX_ERR(0, 13269, __pyx_L102_except_error); __pyx_t_10 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13269, __pyx_L102_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_10)); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L101_exception_handled; } goto __pyx_L102_except_error; /* "cuda/bindings/runtime.pyx":13266 * except ValueError: * str_list += ['disableTrilinearOptimization : '] * try: # <<<<<<<<<<<<<< * str_list += ['seamlessCubemap : ' + str(self.seamlessCubemap)] * except ValueError: */ __pyx_L102_except_error:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L101_exception_handled:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); __pyx_L105_try_end:; } /* "cuda/bindings/runtime.pyx":13270 * except ValueError: * str_list += ['seamlessCubemap : '] * return '\n'.join(str_list) # <<<<<<<<<<<<<< * else: * return '' */ __Pyx_XDECREF(__pyx_r); __pyx_t_8 = PyUnicode_Join(__pyx_mstate_global->__pyx_kp_u__2, __pyx_v_str_list); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13270, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_r = __pyx_t_8; __pyx_t_8 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13216 * return self._pvt_ptr * def __repr__(self): * if self._pvt_ptr is not NULL: # <<<<<<<<<<<<<< * str_list = [] * try: */ } /* "cuda/bindings/runtime.pyx":13272 * return '\n'.join(str_list) * else: * return '' # <<<<<<<<<<<<<< * @property * def addressMode(self): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u__3); __pyx_r = __pyx_mstate_global->__pyx_kp_u__3; goto __pyx_L0; } /* "cuda/bindings/runtime.pyx":13215 * def getPtr(self): * return self._pvt_ptr * def __repr__(self): # <<<<<<<<<<<<<< * if self._pvt_ptr is not NULL: * str_list = [] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_str_list); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13273 * else: * return '' * @property # <<<<<<<<<<<<<< * def addressMode(self): * return [_dict_cudaTextureAddressMode[_x] if _x in _dict_cudaTextureAddressMode else None for _x in list(self._pvt_ptr[0].addressMode)] */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_11addressMode_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_11addressMode_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_11addressMode___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_11addressMode___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_self) { PyObject *__pyx_10genexpr179__pyx_v__x = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; Py_ssize_t __pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; PyObject *__pyx_t_7 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":13275 * @property * def addressMode(self): * return [_dict_cudaTextureAddressMode[_x] if _x in _dict_cudaTextureAddressMode else None for _x in list(self._pvt_ptr[0].addressMode)] # <<<<<<<<<<<<<< * @addressMode.setter * def addressMode(self, addressMode): */ __Pyx_XDECREF(__pyx_r); { /* enter inner scope */ __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13275, __pyx_L5_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_carray_to_py_enum__cudaTextureAddressMode((__pyx_v_self->_pvt_ptr[0]).addressMode, 3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13275, __pyx_L5_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PySequence_ListKeepNew(__pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13275, __pyx_L5_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __pyx_t_3; __Pyx_INCREF(__pyx_t_2); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; for (;;) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_2); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 13275, __pyx_L5_error) #endif if (__pyx_t_4 >= __pyx_temp) break; } __pyx_t_3 = __Pyx_PyList_GetItemRef(__pyx_t_2, __pyx_t_4); ++__pyx_t_4; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13275, __pyx_L5_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_XDECREF_SET(__pyx_10genexpr179__pyx_v__x, __pyx_t_3); __pyx_t_3 = 0; __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaTextureAddressMode); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13275, __pyx_L5_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = (__Pyx_PySequence_ContainsTF(__pyx_10genexpr179__pyx_v__x, __pyx_t_5, Py_EQ)); if (unlikely((__pyx_t_6 < 0))) __PYX_ERR(0, 13275, __pyx_L5_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_6) { __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaTextureAddressMode); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13275, __pyx_L5_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_7 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_10genexpr179__pyx_v__x); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13275, __pyx_L5_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_3 = __pyx_t_7; __pyx_t_7 = 0; } else { __Pyx_INCREF(Py_None); __pyx_t_3 = Py_None; } if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_3))) __PYX_ERR(0, 13275, __pyx_L5_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_10genexpr179__pyx_v__x); __pyx_10genexpr179__pyx_v__x = 0; goto __pyx_L9_exit_scope; __pyx_L5_error:; __Pyx_XDECREF(__pyx_10genexpr179__pyx_v__x); __pyx_10genexpr179__pyx_v__x = 0; goto __pyx_L1_error; __pyx_L9_exit_scope:; } /* exit inner scope */ __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13273 * else: * return '' * @property # <<<<<<<<<<<<<< * def addressMode(self): * return [_dict_cudaTextureAddressMode[_x] if _x in _dict_cudaTextureAddressMode else None for _x in list(self._pvt_ptr[0].addressMode)] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.addressMode.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_10genexpr179__pyx_v__x); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13276 * def addressMode(self): * return [_dict_cudaTextureAddressMode[_x] if _x in _dict_cudaTextureAddressMode else None for _x in list(self._pvt_ptr[0].addressMode)] * @addressMode.setter # <<<<<<<<<<<<<< * def addressMode(self, addressMode): * self._pvt_ptr[0].addressMode = [_x.value for _x in addressMode] */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_11addressMode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_addressMode); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_11addressMode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_addressMode) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_11addressMode_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)__pyx_v_self), ((PyObject *)__pyx_v_addressMode)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_11addressMode_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_self, PyObject *__pyx_v_addressMode) { PyObject *__pyx_10genexpr180__pyx_v__x = NULL; int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; Py_ssize_t __pyx_t_3; PyObject *(*__pyx_t_4)(PyObject *); PyObject *__pyx_t_5 = NULL; enum cudaTextureAddressMode __pyx_t_6[3]; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); /* "cuda/bindings/runtime.pyx":13278 * @addressMode.setter * def addressMode(self, addressMode): * self._pvt_ptr[0].addressMode = [_x.value for _x in addressMode] # <<<<<<<<<<<<<< * @property * def filterMode(self): */ { /* enter inner scope */ __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13278, __pyx_L5_error) __Pyx_GOTREF(__pyx_t_1); if (likely(PyList_CheckExact(__pyx_v_addressMode)) || PyTuple_CheckExact(__pyx_v_addressMode)) { __pyx_t_2 = __pyx_v_addressMode; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; __pyx_t_4 = NULL; } else { __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_addressMode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13278, __pyx_L5_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13278, __pyx_L5_error) } for (;;) { if (likely(!__pyx_t_4)) { if (likely(PyList_CheckExact(__pyx_t_2))) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_2); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 13278, __pyx_L5_error) #endif if (__pyx_t_3 >= __pyx_temp) break; } __pyx_t_5 = __Pyx_PyList_GetItemRef(__pyx_t_2, __pyx_t_3); ++__pyx_t_3; } else { { Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_2); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 13278, __pyx_L5_error) #endif if (__pyx_t_3 >= __pyx_temp) break; } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3)); #else __pyx_t_5 = __Pyx_PySequence_ITEM(__pyx_t_2, __pyx_t_3); #endif ++__pyx_t_3; } if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13278, __pyx_L5_error) } else { __pyx_t_5 = __pyx_t_4(__pyx_t_2); if (unlikely(!__pyx_t_5)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 13278, __pyx_L5_error) PyErr_Clear(); } break; } } __Pyx_GOTREF(__pyx_t_5); __Pyx_XDECREF_SET(__pyx_10genexpr180__pyx_v__x, __pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_10genexpr180__pyx_v__x, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13278, __pyx_L5_error) __Pyx_GOTREF(__pyx_t_5); if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(0, 13278, __pyx_L5_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_10genexpr180__pyx_v__x); __pyx_10genexpr180__pyx_v__x = 0; goto __pyx_L9_exit_scope; __pyx_L5_error:; __Pyx_XDECREF(__pyx_10genexpr180__pyx_v__x); __pyx_10genexpr180__pyx_v__x = 0; goto __pyx_L1_error; __pyx_L9_exit_scope:; } /* exit inner scope */ if (unlikely((__Pyx_carray_from_py_enum__cudaTextureAddressMode(__pyx_t_1, __pyx_t_6, 3) < 0))) __PYX_ERR(0, 13278, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; memcpy(&((__pyx_v_self->_pvt_ptr[0]).addressMode[0]), __pyx_t_6, sizeof((__pyx_v_self->_pvt_ptr[0]).addressMode[0]) * (3)); /* "cuda/bindings/runtime.pyx":13276 * def addressMode(self): * return [_dict_cudaTextureAddressMode[_x] if _x in _dict_cudaTextureAddressMode else None for _x in list(self._pvt_ptr[0].addressMode)] * @addressMode.setter # <<<<<<<<<<<<<< * def addressMode(self, addressMode): * self._pvt_ptr[0].addressMode = [_x.value for _x in addressMode] */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.addressMode.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_10genexpr180__pyx_v__x); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13279 * def addressMode(self, addressMode): * self._pvt_ptr[0].addressMode = [_x.value for _x in addressMode] * @property # <<<<<<<<<<<<<< * def filterMode(self): * if self._pvt_ptr[0].filterMode not in _dict_cudaTextureFilterMode: */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_10filterMode_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_10filterMode_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_10filterMode___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_10filterMode___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":13281 * @property * def filterMode(self): * if self._pvt_ptr[0].filterMode not in _dict_cudaTextureFilterMode: # <<<<<<<<<<<<<< * return None * return _dict_cudaTextureFilterMode[self._pvt_ptr[0].filterMode] */ __pyx_t_1 = __Pyx_PyLong_From_enum__cudaTextureFilterMode((__pyx_v_self->_pvt_ptr[0]).filterMode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13281, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaTextureFilterMode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13281, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = (__Pyx_PySequence_ContainsTF(__pyx_t_1, __pyx_t_2, Py_NE)); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 13281, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (__pyx_t_3) { /* "cuda/bindings/runtime.pyx":13282 * def filterMode(self): * if self._pvt_ptr[0].filterMode not in _dict_cudaTextureFilterMode: * return None # <<<<<<<<<<<<<< * return _dict_cudaTextureFilterMode[self._pvt_ptr[0].filterMode] * @filterMode.setter */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13281 * @property * def filterMode(self): * if self._pvt_ptr[0].filterMode not in _dict_cudaTextureFilterMode: # <<<<<<<<<<<<<< * return None * return _dict_cudaTextureFilterMode[self._pvt_ptr[0].filterMode] */ } /* "cuda/bindings/runtime.pyx":13283 * if self._pvt_ptr[0].filterMode not in _dict_cudaTextureFilterMode: * return None * return _dict_cudaTextureFilterMode[self._pvt_ptr[0].filterMode] # <<<<<<<<<<<<<< * @filterMode.setter * def filterMode(self, filterMode not None : cudaTextureFilterMode): */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaTextureFilterMode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13283, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyLong_From_enum__cudaTextureFilterMode((__pyx_v_self->_pvt_ptr[0]).filterMode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13283, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_2, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13283, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13279 * def addressMode(self, addressMode): * self._pvt_ptr[0].addressMode = [_x.value for _x in addressMode] * @property # <<<<<<<<<<<<<< * def filterMode(self): * if self._pvt_ptr[0].filterMode not in _dict_cudaTextureFilterMode: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.filterMode.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13284 * return None * return _dict_cudaTextureFilterMode[self._pvt_ptr[0].filterMode] * @filterMode.setter # <<<<<<<<<<<<<< * def filterMode(self, filterMode not None : cudaTextureFilterMode): * self._pvt_ptr[0].filterMode = filterMode.value */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_10filterMode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_filterMode); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_10filterMode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_filterMode) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); if (unlikely(((PyObject *)__pyx_v_filterMode) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "filterMode"); __PYX_ERR(0, 13285, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_10filterMode_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)__pyx_v_self), ((PyObject *)__pyx_v_filterMode)); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; goto __pyx_L5_cleaned_up; __pyx_L0:; __pyx_L5_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_10filterMode_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_self, PyObject *__pyx_v_filterMode) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; enum cudaTextureFilterMode __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); /* "cuda/bindings/runtime.pyx":13286 * @filterMode.setter * def filterMode(self, filterMode not None : cudaTextureFilterMode): * self._pvt_ptr[0].filterMode = filterMode.value # <<<<<<<<<<<<<< * @property * def readMode(self): */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_filterMode, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13286, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = ((enum cudaTextureFilterMode)__Pyx_PyLong_As_enum__cudaTextureFilterMode(__pyx_t_1)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 13286, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; (__pyx_v_self->_pvt_ptr[0]).filterMode = __pyx_t_2; /* "cuda/bindings/runtime.pyx":13284 * return None * return _dict_cudaTextureFilterMode[self._pvt_ptr[0].filterMode] * @filterMode.setter # <<<<<<<<<<<<<< * def filterMode(self, filterMode not None : cudaTextureFilterMode): * self._pvt_ptr[0].filterMode = filterMode.value */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.filterMode.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13287 * def filterMode(self, filterMode not None : cudaTextureFilterMode): * self._pvt_ptr[0].filterMode = filterMode.value * @property # <<<<<<<<<<<<<< * def readMode(self): * if self._pvt_ptr[0].readMode not in _dict_cudaTextureReadMode: */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_8readMode_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_8readMode_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_8readMode___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_8readMode___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":13289 * @property * def readMode(self): * if self._pvt_ptr[0].readMode not in _dict_cudaTextureReadMode: # <<<<<<<<<<<<<< * return None * return _dict_cudaTextureReadMode[self._pvt_ptr[0].readMode] */ __pyx_t_1 = __Pyx_PyLong_From_enum__cudaTextureReadMode((__pyx_v_self->_pvt_ptr[0]).readMode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13289, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaTextureReadMode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13289, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = (__Pyx_PySequence_ContainsTF(__pyx_t_1, __pyx_t_2, Py_NE)); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 13289, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (__pyx_t_3) { /* "cuda/bindings/runtime.pyx":13290 * def readMode(self): * if self._pvt_ptr[0].readMode not in _dict_cudaTextureReadMode: * return None # <<<<<<<<<<<<<< * return _dict_cudaTextureReadMode[self._pvt_ptr[0].readMode] * @readMode.setter */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13289 * @property * def readMode(self): * if self._pvt_ptr[0].readMode not in _dict_cudaTextureReadMode: # <<<<<<<<<<<<<< * return None * return _dict_cudaTextureReadMode[self._pvt_ptr[0].readMode] */ } /* "cuda/bindings/runtime.pyx":13291 * if self._pvt_ptr[0].readMode not in _dict_cudaTextureReadMode: * return None * return _dict_cudaTextureReadMode[self._pvt_ptr[0].readMode] # <<<<<<<<<<<<<< * @readMode.setter * def readMode(self, readMode not None : cudaTextureReadMode): */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaTextureReadMode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13291, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyLong_From_enum__cudaTextureReadMode((__pyx_v_self->_pvt_ptr[0]).readMode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13291, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_2, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13291, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13287 * def filterMode(self, filterMode not None : cudaTextureFilterMode): * self._pvt_ptr[0].filterMode = filterMode.value * @property # <<<<<<<<<<<<<< * def readMode(self): * if self._pvt_ptr[0].readMode not in _dict_cudaTextureReadMode: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.readMode.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13292 * return None * return _dict_cudaTextureReadMode[self._pvt_ptr[0].readMode] * @readMode.setter # <<<<<<<<<<<<<< * def readMode(self, readMode not None : cudaTextureReadMode): * self._pvt_ptr[0].readMode = readMode.value */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_8readMode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_readMode); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_8readMode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_readMode) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); if (unlikely(((PyObject *)__pyx_v_readMode) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "readMode"); __PYX_ERR(0, 13293, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_8readMode_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)__pyx_v_self), ((PyObject *)__pyx_v_readMode)); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; goto __pyx_L5_cleaned_up; __pyx_L0:; __pyx_L5_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_8readMode_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_self, PyObject *__pyx_v_readMode) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; enum cudaTextureReadMode __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); /* "cuda/bindings/runtime.pyx":13294 * @readMode.setter * def readMode(self, readMode not None : cudaTextureReadMode): * self._pvt_ptr[0].readMode = readMode.value # <<<<<<<<<<<<<< * @property * def sRGB(self): */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_readMode, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13294, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = ((enum cudaTextureReadMode)__Pyx_PyLong_As_enum__cudaTextureReadMode(__pyx_t_1)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 13294, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; (__pyx_v_self->_pvt_ptr[0]).readMode = __pyx_t_2; /* "cuda/bindings/runtime.pyx":13292 * return None * return _dict_cudaTextureReadMode[self._pvt_ptr[0].readMode] * @readMode.setter # <<<<<<<<<<<<<< * def readMode(self, readMode not None : cudaTextureReadMode): * self._pvt_ptr[0].readMode = readMode.value */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.readMode.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13295 * def readMode(self, readMode not None : cudaTextureReadMode): * self._pvt_ptr[0].readMode = readMode.value * @property # <<<<<<<<<<<<<< * def sRGB(self): * return self._pvt_ptr[0].sRGB */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_4sRGB_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_4sRGB_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_4sRGB___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_4sRGB___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":13297 * @property * def sRGB(self): * return self._pvt_ptr[0].sRGB # <<<<<<<<<<<<<< * @sRGB.setter * def sRGB(self, int sRGB): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_int((__pyx_v_self->_pvt_ptr[0]).sRGB); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13297, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13295 * def readMode(self, readMode not None : cudaTextureReadMode): * self._pvt_ptr[0].readMode = readMode.value * @property # <<<<<<<<<<<<<< * def sRGB(self): * return self._pvt_ptr[0].sRGB */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.sRGB.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13298 * def sRGB(self): * return self._pvt_ptr[0].sRGB * @sRGB.setter # <<<<<<<<<<<<<< * def sRGB(self, int sRGB): * self._pvt_ptr[0].sRGB = sRGB */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_4sRGB_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_sRGB); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_4sRGB_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_sRGB) { int __pyx_v_sRGB; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); assert(__pyx_arg_sRGB); { __pyx_v_sRGB = __Pyx_PyLong_As_int(__pyx_arg_sRGB); if (unlikely((__pyx_v_sRGB == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13299, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.sRGB.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_4sRGB_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)__pyx_v_self), ((int)__pyx_v_sRGB)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_4sRGB_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_self, int __pyx_v_sRGB) { int __pyx_r; /* "cuda/bindings/runtime.pyx":13300 * @sRGB.setter * def sRGB(self, int sRGB): * self._pvt_ptr[0].sRGB = sRGB # <<<<<<<<<<<<<< * @property * def borderColor(self): */ (__pyx_v_self->_pvt_ptr[0]).sRGB = __pyx_v_sRGB; /* "cuda/bindings/runtime.pyx":13298 * def sRGB(self): * return self._pvt_ptr[0].sRGB * @sRGB.setter # <<<<<<<<<<<<<< * def sRGB(self, int sRGB): * self._pvt_ptr[0].sRGB = sRGB */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":13301 * def sRGB(self, int sRGB): * self._pvt_ptr[0].sRGB = sRGB * @property # <<<<<<<<<<<<<< * def borderColor(self): * return self._pvt_ptr[0].borderColor */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_11borderColor_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_11borderColor_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_11borderColor___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_11borderColor___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":13303 * @property * def borderColor(self): * return self._pvt_ptr[0].borderColor # <<<<<<<<<<<<<< * @borderColor.setter * def borderColor(self, borderColor): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_carray_to_py_float((__pyx_v_self->_pvt_ptr[0]).borderColor, 4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13303, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13301 * def sRGB(self, int sRGB): * self._pvt_ptr[0].sRGB = sRGB * @property # <<<<<<<<<<<<<< * def borderColor(self): * return self._pvt_ptr[0].borderColor */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.borderColor.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13304 * def borderColor(self): * return self._pvt_ptr[0].borderColor * @borderColor.setter # <<<<<<<<<<<<<< * def borderColor(self, borderColor): * self._pvt_ptr[0].borderColor = borderColor */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_11borderColor_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_borderColor); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_11borderColor_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_borderColor) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_11borderColor_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)__pyx_v_self), ((PyObject *)__pyx_v_borderColor)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_11borderColor_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_self, PyObject *__pyx_v_borderColor) { int __pyx_r; float __pyx_t_1[4]; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "cuda/bindings/runtime.pyx":13306 * @borderColor.setter * def borderColor(self, borderColor): * self._pvt_ptr[0].borderColor = borderColor # <<<<<<<<<<<<<< * @property * def normalizedCoords(self): */ if (unlikely((__Pyx_carray_from_py_float(__pyx_v_borderColor, __pyx_t_1, 4) < 0))) __PYX_ERR(0, 13306, __pyx_L1_error) memcpy(&((__pyx_v_self->_pvt_ptr[0]).borderColor[0]), __pyx_t_1, sizeof((__pyx_v_self->_pvt_ptr[0]).borderColor[0]) * (4)); /* "cuda/bindings/runtime.pyx":13304 * def borderColor(self): * return self._pvt_ptr[0].borderColor * @borderColor.setter # <<<<<<<<<<<<<< * def borderColor(self, borderColor): * self._pvt_ptr[0].borderColor = borderColor */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.borderColor.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "cuda/bindings/runtime.pyx":13307 * def borderColor(self, borderColor): * self._pvt_ptr[0].borderColor = borderColor * @property # <<<<<<<<<<<<<< * def normalizedCoords(self): * return self._pvt_ptr[0].normalizedCoords */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_16normalizedCoords_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_16normalizedCoords_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_16normalizedCoords___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_16normalizedCoords___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":13309 * @property * def normalizedCoords(self): * return self._pvt_ptr[0].normalizedCoords # <<<<<<<<<<<<<< * @normalizedCoords.setter * def normalizedCoords(self, int normalizedCoords): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_int((__pyx_v_self->_pvt_ptr[0]).normalizedCoords); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13309, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13307 * def borderColor(self, borderColor): * self._pvt_ptr[0].borderColor = borderColor * @property # <<<<<<<<<<<<<< * def normalizedCoords(self): * return self._pvt_ptr[0].normalizedCoords */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.normalizedCoords.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13310 * def normalizedCoords(self): * return self._pvt_ptr[0].normalizedCoords * @normalizedCoords.setter # <<<<<<<<<<<<<< * def normalizedCoords(self, int normalizedCoords): * self._pvt_ptr[0].normalizedCoords = normalizedCoords */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_16normalizedCoords_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_normalizedCoords); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_16normalizedCoords_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_normalizedCoords) { int __pyx_v_normalizedCoords; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); assert(__pyx_arg_normalizedCoords); { __pyx_v_normalizedCoords = __Pyx_PyLong_As_int(__pyx_arg_normalizedCoords); if (unlikely((__pyx_v_normalizedCoords == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13311, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.normalizedCoords.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_16normalizedCoords_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)__pyx_v_self), ((int)__pyx_v_normalizedCoords)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_16normalizedCoords_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_self, int __pyx_v_normalizedCoords) { int __pyx_r; /* "cuda/bindings/runtime.pyx":13312 * @normalizedCoords.setter * def normalizedCoords(self, int normalizedCoords): * self._pvt_ptr[0].normalizedCoords = normalizedCoords # <<<<<<<<<<<<<< * @property * def maxAnisotropy(self): */ (__pyx_v_self->_pvt_ptr[0]).normalizedCoords = __pyx_v_normalizedCoords; /* "cuda/bindings/runtime.pyx":13310 * def normalizedCoords(self): * return self._pvt_ptr[0].normalizedCoords * @normalizedCoords.setter # <<<<<<<<<<<<<< * def normalizedCoords(self, int normalizedCoords): * self._pvt_ptr[0].normalizedCoords = normalizedCoords */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":13313 * def normalizedCoords(self, int normalizedCoords): * self._pvt_ptr[0].normalizedCoords = normalizedCoords * @property # <<<<<<<<<<<<<< * def maxAnisotropy(self): * return self._pvt_ptr[0].maxAnisotropy */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_13maxAnisotropy_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_13maxAnisotropy_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_13maxAnisotropy___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_13maxAnisotropy___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":13315 * @property * def maxAnisotropy(self): * return self._pvt_ptr[0].maxAnisotropy # <<<<<<<<<<<<<< * @maxAnisotropy.setter * def maxAnisotropy(self, unsigned int maxAnisotropy): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_pvt_ptr[0]).maxAnisotropy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13315, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13313 * def normalizedCoords(self, int normalizedCoords): * self._pvt_ptr[0].normalizedCoords = normalizedCoords * @property # <<<<<<<<<<<<<< * def maxAnisotropy(self): * return self._pvt_ptr[0].maxAnisotropy */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.maxAnisotropy.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13316 * def maxAnisotropy(self): * return self._pvt_ptr[0].maxAnisotropy * @maxAnisotropy.setter # <<<<<<<<<<<<<< * def maxAnisotropy(self, unsigned int maxAnisotropy): * self._pvt_ptr[0].maxAnisotropy = maxAnisotropy */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_13maxAnisotropy_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_maxAnisotropy); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_13maxAnisotropy_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_maxAnisotropy) { unsigned int __pyx_v_maxAnisotropy; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); assert(__pyx_arg_maxAnisotropy); { __pyx_v_maxAnisotropy = __Pyx_PyLong_As_unsigned_int(__pyx_arg_maxAnisotropy); if (unlikely((__pyx_v_maxAnisotropy == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13317, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.maxAnisotropy.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_13maxAnisotropy_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)__pyx_v_self), ((unsigned int)__pyx_v_maxAnisotropy)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_13maxAnisotropy_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_self, unsigned int __pyx_v_maxAnisotropy) { int __pyx_r; /* "cuda/bindings/runtime.pyx":13318 * @maxAnisotropy.setter * def maxAnisotropy(self, unsigned int maxAnisotropy): * self._pvt_ptr[0].maxAnisotropy = maxAnisotropy # <<<<<<<<<<<<<< * @property * def mipmapFilterMode(self): */ (__pyx_v_self->_pvt_ptr[0]).maxAnisotropy = __pyx_v_maxAnisotropy; /* "cuda/bindings/runtime.pyx":13316 * def maxAnisotropy(self): * return self._pvt_ptr[0].maxAnisotropy * @maxAnisotropy.setter # <<<<<<<<<<<<<< * def maxAnisotropy(self, unsigned int maxAnisotropy): * self._pvt_ptr[0].maxAnisotropy = maxAnisotropy */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":13319 * def maxAnisotropy(self, unsigned int maxAnisotropy): * self._pvt_ptr[0].maxAnisotropy = maxAnisotropy * @property # <<<<<<<<<<<<<< * def mipmapFilterMode(self): * if self._pvt_ptr[0].mipmapFilterMode not in _dict_cudaTextureFilterMode: */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_16mipmapFilterMode_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_16mipmapFilterMode_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_16mipmapFilterMode___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_16mipmapFilterMode___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":13321 * @property * def mipmapFilterMode(self): * if self._pvt_ptr[0].mipmapFilterMode not in _dict_cudaTextureFilterMode: # <<<<<<<<<<<<<< * return None * return _dict_cudaTextureFilterMode[self._pvt_ptr[0].mipmapFilterMode] */ __pyx_t_1 = __Pyx_PyLong_From_enum__cudaTextureFilterMode((__pyx_v_self->_pvt_ptr[0]).mipmapFilterMode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13321, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaTextureFilterMode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13321, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = (__Pyx_PySequence_ContainsTF(__pyx_t_1, __pyx_t_2, Py_NE)); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 13321, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (__pyx_t_3) { /* "cuda/bindings/runtime.pyx":13322 * def mipmapFilterMode(self): * if self._pvt_ptr[0].mipmapFilterMode not in _dict_cudaTextureFilterMode: * return None # <<<<<<<<<<<<<< * return _dict_cudaTextureFilterMode[self._pvt_ptr[0].mipmapFilterMode] * @mipmapFilterMode.setter */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13321 * @property * def mipmapFilterMode(self): * if self._pvt_ptr[0].mipmapFilterMode not in _dict_cudaTextureFilterMode: # <<<<<<<<<<<<<< * return None * return _dict_cudaTextureFilterMode[self._pvt_ptr[0].mipmapFilterMode] */ } /* "cuda/bindings/runtime.pyx":13323 * if self._pvt_ptr[0].mipmapFilterMode not in _dict_cudaTextureFilterMode: * return None * return _dict_cudaTextureFilterMode[self._pvt_ptr[0].mipmapFilterMode] # <<<<<<<<<<<<<< * @mipmapFilterMode.setter * def mipmapFilterMode(self, mipmapFilterMode not None : cudaTextureFilterMode): */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaTextureFilterMode); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13323, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyLong_From_enum__cudaTextureFilterMode((__pyx_v_self->_pvt_ptr[0]).mipmapFilterMode); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13323, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_2, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13323, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13319 * def maxAnisotropy(self, unsigned int maxAnisotropy): * self._pvt_ptr[0].maxAnisotropy = maxAnisotropy * @property # <<<<<<<<<<<<<< * def mipmapFilterMode(self): * if self._pvt_ptr[0].mipmapFilterMode not in _dict_cudaTextureFilterMode: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.mipmapFilterMode.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13324 * return None * return _dict_cudaTextureFilterMode[self._pvt_ptr[0].mipmapFilterMode] * @mipmapFilterMode.setter # <<<<<<<<<<<<<< * def mipmapFilterMode(self, mipmapFilterMode not None : cudaTextureFilterMode): * self._pvt_ptr[0].mipmapFilterMode = mipmapFilterMode.value */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_16mipmapFilterMode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_mipmapFilterMode); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_16mipmapFilterMode_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_mipmapFilterMode) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); if (unlikely(((PyObject *)__pyx_v_mipmapFilterMode) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "mipmapFilterMode"); __PYX_ERR(0, 13325, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_16mipmapFilterMode_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)__pyx_v_self), ((PyObject *)__pyx_v_mipmapFilterMode)); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; goto __pyx_L5_cleaned_up; __pyx_L0:; __pyx_L5_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_16mipmapFilterMode_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_self, PyObject *__pyx_v_mipmapFilterMode) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; enum cudaTextureFilterMode __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); /* "cuda/bindings/runtime.pyx":13326 * @mipmapFilterMode.setter * def mipmapFilterMode(self, mipmapFilterMode not None : cudaTextureFilterMode): * self._pvt_ptr[0].mipmapFilterMode = mipmapFilterMode.value # <<<<<<<<<<<<<< * @property * def mipmapLevelBias(self): */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_mipmapFilterMode, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13326, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = ((enum cudaTextureFilterMode)__Pyx_PyLong_As_enum__cudaTextureFilterMode(__pyx_t_1)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 13326, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; (__pyx_v_self->_pvt_ptr[0]).mipmapFilterMode = __pyx_t_2; /* "cuda/bindings/runtime.pyx":13324 * return None * return _dict_cudaTextureFilterMode[self._pvt_ptr[0].mipmapFilterMode] * @mipmapFilterMode.setter # <<<<<<<<<<<<<< * def mipmapFilterMode(self, mipmapFilterMode not None : cudaTextureFilterMode): * self._pvt_ptr[0].mipmapFilterMode = mipmapFilterMode.value */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.mipmapFilterMode.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13327 * def mipmapFilterMode(self, mipmapFilterMode not None : cudaTextureFilterMode): * self._pvt_ptr[0].mipmapFilterMode = mipmapFilterMode.value * @property # <<<<<<<<<<<<<< * def mipmapLevelBias(self): * return self._pvt_ptr[0].mipmapLevelBias */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_15mipmapLevelBias_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_15mipmapLevelBias_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_15mipmapLevelBias___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_15mipmapLevelBias___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":13329 * @property * def mipmapLevelBias(self): * return self._pvt_ptr[0].mipmapLevelBias # <<<<<<<<<<<<<< * @mipmapLevelBias.setter * def mipmapLevelBias(self, float mipmapLevelBias): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyFloat_FromDouble((__pyx_v_self->_pvt_ptr[0]).mipmapLevelBias); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13329, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13327 * def mipmapFilterMode(self, mipmapFilterMode not None : cudaTextureFilterMode): * self._pvt_ptr[0].mipmapFilterMode = mipmapFilterMode.value * @property # <<<<<<<<<<<<<< * def mipmapLevelBias(self): * return self._pvt_ptr[0].mipmapLevelBias */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.mipmapLevelBias.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13330 * def mipmapLevelBias(self): * return self._pvt_ptr[0].mipmapLevelBias * @mipmapLevelBias.setter # <<<<<<<<<<<<<< * def mipmapLevelBias(self, float mipmapLevelBias): * self._pvt_ptr[0].mipmapLevelBias = mipmapLevelBias */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_15mipmapLevelBias_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_mipmapLevelBias); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_15mipmapLevelBias_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_mipmapLevelBias) { float __pyx_v_mipmapLevelBias; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); assert(__pyx_arg_mipmapLevelBias); { __pyx_v_mipmapLevelBias = __Pyx_PyFloat_AsFloat(__pyx_arg_mipmapLevelBias); if (unlikely((__pyx_v_mipmapLevelBias == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 13331, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.mipmapLevelBias.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_15mipmapLevelBias_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)__pyx_v_self), ((float)__pyx_v_mipmapLevelBias)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_15mipmapLevelBias_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_self, float __pyx_v_mipmapLevelBias) { int __pyx_r; /* "cuda/bindings/runtime.pyx":13332 * @mipmapLevelBias.setter * def mipmapLevelBias(self, float mipmapLevelBias): * self._pvt_ptr[0].mipmapLevelBias = mipmapLevelBias # <<<<<<<<<<<<<< * @property * def minMipmapLevelClamp(self): */ (__pyx_v_self->_pvt_ptr[0]).mipmapLevelBias = __pyx_v_mipmapLevelBias; /* "cuda/bindings/runtime.pyx":13330 * def mipmapLevelBias(self): * return self._pvt_ptr[0].mipmapLevelBias * @mipmapLevelBias.setter # <<<<<<<<<<<<<< * def mipmapLevelBias(self, float mipmapLevelBias): * self._pvt_ptr[0].mipmapLevelBias = mipmapLevelBias */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":13333 * def mipmapLevelBias(self, float mipmapLevelBias): * self._pvt_ptr[0].mipmapLevelBias = mipmapLevelBias * @property # <<<<<<<<<<<<<< * def minMipmapLevelClamp(self): * return self._pvt_ptr[0].minMipmapLevelClamp */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_19minMipmapLevelClamp_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_19minMipmapLevelClamp_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_19minMipmapLevelClamp___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_19minMipmapLevelClamp___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":13335 * @property * def minMipmapLevelClamp(self): * return self._pvt_ptr[0].minMipmapLevelClamp # <<<<<<<<<<<<<< * @minMipmapLevelClamp.setter * def minMipmapLevelClamp(self, float minMipmapLevelClamp): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyFloat_FromDouble((__pyx_v_self->_pvt_ptr[0]).minMipmapLevelClamp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13335, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13333 * def mipmapLevelBias(self, float mipmapLevelBias): * self._pvt_ptr[0].mipmapLevelBias = mipmapLevelBias * @property # <<<<<<<<<<<<<< * def minMipmapLevelClamp(self): * return self._pvt_ptr[0].minMipmapLevelClamp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.minMipmapLevelClamp.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13336 * def minMipmapLevelClamp(self): * return self._pvt_ptr[0].minMipmapLevelClamp * @minMipmapLevelClamp.setter # <<<<<<<<<<<<<< * def minMipmapLevelClamp(self, float minMipmapLevelClamp): * self._pvt_ptr[0].minMipmapLevelClamp = minMipmapLevelClamp */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_19minMipmapLevelClamp_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_minMipmapLevelClamp); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_19minMipmapLevelClamp_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_minMipmapLevelClamp) { float __pyx_v_minMipmapLevelClamp; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); assert(__pyx_arg_minMipmapLevelClamp); { __pyx_v_minMipmapLevelClamp = __Pyx_PyFloat_AsFloat(__pyx_arg_minMipmapLevelClamp); if (unlikely((__pyx_v_minMipmapLevelClamp == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 13337, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.minMipmapLevelClamp.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_19minMipmapLevelClamp_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)__pyx_v_self), ((float)__pyx_v_minMipmapLevelClamp)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_19minMipmapLevelClamp_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_self, float __pyx_v_minMipmapLevelClamp) { int __pyx_r; /* "cuda/bindings/runtime.pyx":13338 * @minMipmapLevelClamp.setter * def minMipmapLevelClamp(self, float minMipmapLevelClamp): * self._pvt_ptr[0].minMipmapLevelClamp = minMipmapLevelClamp # <<<<<<<<<<<<<< * @property * def maxMipmapLevelClamp(self): */ (__pyx_v_self->_pvt_ptr[0]).minMipmapLevelClamp = __pyx_v_minMipmapLevelClamp; /* "cuda/bindings/runtime.pyx":13336 * def minMipmapLevelClamp(self): * return self._pvt_ptr[0].minMipmapLevelClamp * @minMipmapLevelClamp.setter # <<<<<<<<<<<<<< * def minMipmapLevelClamp(self, float minMipmapLevelClamp): * self._pvt_ptr[0].minMipmapLevelClamp = minMipmapLevelClamp */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":13339 * def minMipmapLevelClamp(self, float minMipmapLevelClamp): * self._pvt_ptr[0].minMipmapLevelClamp = minMipmapLevelClamp * @property # <<<<<<<<<<<<<< * def maxMipmapLevelClamp(self): * return self._pvt_ptr[0].maxMipmapLevelClamp */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_19maxMipmapLevelClamp_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_19maxMipmapLevelClamp_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_19maxMipmapLevelClamp___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_19maxMipmapLevelClamp___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":13341 * @property * def maxMipmapLevelClamp(self): * return self._pvt_ptr[0].maxMipmapLevelClamp # <<<<<<<<<<<<<< * @maxMipmapLevelClamp.setter * def maxMipmapLevelClamp(self, float maxMipmapLevelClamp): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyFloat_FromDouble((__pyx_v_self->_pvt_ptr[0]).maxMipmapLevelClamp); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13341, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13339 * def minMipmapLevelClamp(self, float minMipmapLevelClamp): * self._pvt_ptr[0].minMipmapLevelClamp = minMipmapLevelClamp * @property # <<<<<<<<<<<<<< * def maxMipmapLevelClamp(self): * return self._pvt_ptr[0].maxMipmapLevelClamp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.maxMipmapLevelClamp.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13342 * def maxMipmapLevelClamp(self): * return self._pvt_ptr[0].maxMipmapLevelClamp * @maxMipmapLevelClamp.setter # <<<<<<<<<<<<<< * def maxMipmapLevelClamp(self, float maxMipmapLevelClamp): * self._pvt_ptr[0].maxMipmapLevelClamp = maxMipmapLevelClamp */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_19maxMipmapLevelClamp_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_maxMipmapLevelClamp); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_19maxMipmapLevelClamp_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_maxMipmapLevelClamp) { float __pyx_v_maxMipmapLevelClamp; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); assert(__pyx_arg_maxMipmapLevelClamp); { __pyx_v_maxMipmapLevelClamp = __Pyx_PyFloat_AsFloat(__pyx_arg_maxMipmapLevelClamp); if (unlikely((__pyx_v_maxMipmapLevelClamp == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 13343, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.maxMipmapLevelClamp.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_19maxMipmapLevelClamp_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)__pyx_v_self), ((float)__pyx_v_maxMipmapLevelClamp)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_19maxMipmapLevelClamp_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_self, float __pyx_v_maxMipmapLevelClamp) { int __pyx_r; /* "cuda/bindings/runtime.pyx":13344 * @maxMipmapLevelClamp.setter * def maxMipmapLevelClamp(self, float maxMipmapLevelClamp): * self._pvt_ptr[0].maxMipmapLevelClamp = maxMipmapLevelClamp # <<<<<<<<<<<<<< * @property * def disableTrilinearOptimization(self): */ (__pyx_v_self->_pvt_ptr[0]).maxMipmapLevelClamp = __pyx_v_maxMipmapLevelClamp; /* "cuda/bindings/runtime.pyx":13342 * def maxMipmapLevelClamp(self): * return self._pvt_ptr[0].maxMipmapLevelClamp * @maxMipmapLevelClamp.setter # <<<<<<<<<<<<<< * def maxMipmapLevelClamp(self, float maxMipmapLevelClamp): * self._pvt_ptr[0].maxMipmapLevelClamp = maxMipmapLevelClamp */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":13345 * def maxMipmapLevelClamp(self, float maxMipmapLevelClamp): * self._pvt_ptr[0].maxMipmapLevelClamp = maxMipmapLevelClamp * @property # <<<<<<<<<<<<<< * def disableTrilinearOptimization(self): * return self._pvt_ptr[0].disableTrilinearOptimization */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_28disableTrilinearOptimization_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_28disableTrilinearOptimization_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_28disableTrilinearOptimization___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_28disableTrilinearOptimization___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":13347 * @property * def disableTrilinearOptimization(self): * return self._pvt_ptr[0].disableTrilinearOptimization # <<<<<<<<<<<<<< * @disableTrilinearOptimization.setter * def disableTrilinearOptimization(self, int disableTrilinearOptimization): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_int((__pyx_v_self->_pvt_ptr[0]).disableTrilinearOptimization); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13347, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13345 * def maxMipmapLevelClamp(self, float maxMipmapLevelClamp): * self._pvt_ptr[0].maxMipmapLevelClamp = maxMipmapLevelClamp * @property # <<<<<<<<<<<<<< * def disableTrilinearOptimization(self): * return self._pvt_ptr[0].disableTrilinearOptimization */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.disableTrilinearOptimization.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13348 * def disableTrilinearOptimization(self): * return self._pvt_ptr[0].disableTrilinearOptimization * @disableTrilinearOptimization.setter # <<<<<<<<<<<<<< * def disableTrilinearOptimization(self, int disableTrilinearOptimization): * self._pvt_ptr[0].disableTrilinearOptimization = disableTrilinearOptimization */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_28disableTrilinearOptimization_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_disableTrilinearOptimization); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_28disableTrilinearOptimization_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_disableTrilinearOptimization) { int __pyx_v_disableTrilinearOptimization; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); assert(__pyx_arg_disableTrilinearOptimization); { __pyx_v_disableTrilinearOptimization = __Pyx_PyLong_As_int(__pyx_arg_disableTrilinearOptimization); if (unlikely((__pyx_v_disableTrilinearOptimization == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13349, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.disableTrilinearOptimization.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_28disableTrilinearOptimization_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)__pyx_v_self), ((int)__pyx_v_disableTrilinearOptimization)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_28disableTrilinearOptimization_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_self, int __pyx_v_disableTrilinearOptimization) { int __pyx_r; /* "cuda/bindings/runtime.pyx":13350 * @disableTrilinearOptimization.setter * def disableTrilinearOptimization(self, int disableTrilinearOptimization): * self._pvt_ptr[0].disableTrilinearOptimization = disableTrilinearOptimization # <<<<<<<<<<<<<< * @property * def seamlessCubemap(self): */ (__pyx_v_self->_pvt_ptr[0]).disableTrilinearOptimization = __pyx_v_disableTrilinearOptimization; /* "cuda/bindings/runtime.pyx":13348 * def disableTrilinearOptimization(self): * return self._pvt_ptr[0].disableTrilinearOptimization * @disableTrilinearOptimization.setter # <<<<<<<<<<<<<< * def disableTrilinearOptimization(self, int disableTrilinearOptimization): * self._pvt_ptr[0].disableTrilinearOptimization = disableTrilinearOptimization */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":13351 * def disableTrilinearOptimization(self, int disableTrilinearOptimization): * self._pvt_ptr[0].disableTrilinearOptimization = disableTrilinearOptimization * @property # <<<<<<<<<<<<<< * def seamlessCubemap(self): * return self._pvt_ptr[0].seamlessCubemap */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_15seamlessCubemap_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_15seamlessCubemap_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_15seamlessCubemap___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_15seamlessCubemap___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":13353 * @property * def seamlessCubemap(self): * return self._pvt_ptr[0].seamlessCubemap # <<<<<<<<<<<<<< * @seamlessCubemap.setter * def seamlessCubemap(self, int seamlessCubemap): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_int((__pyx_v_self->_pvt_ptr[0]).seamlessCubemap); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13353, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13351 * def disableTrilinearOptimization(self, int disableTrilinearOptimization): * self._pvt_ptr[0].disableTrilinearOptimization = disableTrilinearOptimization * @property # <<<<<<<<<<<<<< * def seamlessCubemap(self): * return self._pvt_ptr[0].seamlessCubemap */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.seamlessCubemap.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13354 * def seamlessCubemap(self): * return self._pvt_ptr[0].seamlessCubemap * @seamlessCubemap.setter # <<<<<<<<<<<<<< * def seamlessCubemap(self, int seamlessCubemap): * self._pvt_ptr[0].seamlessCubemap = seamlessCubemap */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_15seamlessCubemap_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_seamlessCubemap); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_15seamlessCubemap_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_seamlessCubemap) { int __pyx_v_seamlessCubemap; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); assert(__pyx_arg_seamlessCubemap); { __pyx_v_seamlessCubemap = __Pyx_PyLong_As_int(__pyx_arg_seamlessCubemap); if (unlikely((__pyx_v_seamlessCubemap == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13355, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.seamlessCubemap.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_15seamlessCubemap_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)__pyx_v_self), ((int)__pyx_v_seamlessCubemap)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_15seamlessCubemap_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_self, int __pyx_v_seamlessCubemap) { int __pyx_r; /* "cuda/bindings/runtime.pyx":13356 * @seamlessCubemap.setter * def seamlessCubemap(self, int seamlessCubemap): * self._pvt_ptr[0].seamlessCubemap = seamlessCubemap # <<<<<<<<<<<<<< * * cdef class cudaEglPlaneDesc_st: */ (__pyx_v_self->_pvt_ptr[0]).seamlessCubemap = __pyx_v_seamlessCubemap; /* "cuda/bindings/runtime.pyx":13354 * def seamlessCubemap(self): * return self._pvt_ptr[0].seamlessCubemap * @seamlessCubemap.setter # <<<<<<<<<<<<<< * def seamlessCubemap(self, int seamlessCubemap): * self._pvt_ptr[0].seamlessCubemap = seamlessCubemap */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_15cudaTextureDesc_10__reduce_cython__, "cudaTextureDesc.__reduce_cython__(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_15cudaTextureDesc_11__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_11__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_15cudaTextureDesc_10__reduce_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_10__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_10__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_15cudaTextureDesc_12__setstate_cython__, "cudaTextureDesc.__setstate_cython__(self, __pyx_state)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_15cudaTextureDesc_13__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_13__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_15cudaTextureDesc_12__setstate_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaTextureDesc_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(2, 3, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(2, 3, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(2, 3, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) } __pyx_v___pyx_state = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(2, 3, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_12__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)__pyx_v_self), __pyx_v___pyx_state); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_15cudaTextureDesc_12__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureDesc.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13385 * Get memory address of class instance * """ * def __cinit__(self, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * if _ptr == 0: * self._pvt_ptr = &self._pvt_val */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr; CYTHON_UNUSED Py_ssize_t __pyx_nargs; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1; #endif __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13385, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13385, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__cinit__", 0) < (0)) __PYX_ERR(0, 13385, __pyx_L3_error) } else { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13385, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } } if (values[0]) { __pyx_v__ptr = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[0]); if (unlikely((__pyx_v__ptr == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13385, __pyx_L3_error) } else { __pyx_v__ptr = ((__pyx_t_4cuda_8bindings_7runtime_void_ptr)0); } } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 13385, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglPlaneDesc_st.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st___cinit__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *)__pyx_v_self), __pyx_v__ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st___cinit__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *__pyx_v_self, __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr) { int __pyx_r; int __pyx_t_1; /* "cuda/bindings/runtime.pyx":13386 * """ * def __cinit__(self, void_ptr _ptr = 0): * if _ptr == 0: # <<<<<<<<<<<<<< * self._pvt_ptr = &self._pvt_val * else: */ __pyx_t_1 = (__pyx_v__ptr == 0); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":13387 * def __cinit__(self, void_ptr _ptr = 0): * if _ptr == 0: * self._pvt_ptr = &self._pvt_val # <<<<<<<<<<<<<< * else: * self._pvt_ptr = _ptr */ __pyx_v_self->_pvt_ptr = (&__pyx_v_self->_pvt_val); /* "cuda/bindings/runtime.pyx":13386 * """ * def __cinit__(self, void_ptr _ptr = 0): * if _ptr == 0: # <<<<<<<<<<<<<< * self._pvt_ptr = &self._pvt_val * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":13389 * self._pvt_ptr = &self._pvt_val * else: * self._pvt_ptr = _ptr # <<<<<<<<<<<<<< * def __init__(self, void_ptr _ptr = 0): * pass */ /*else*/ { __pyx_v_self->_pvt_ptr = ((struct __pyx_t_4cuda_8bindings_9cyruntime_cudaEglPlaneDesc_st *)__pyx_v__ptr); } __pyx_L3:; /* "cuda/bindings/runtime.pyx":13385 * Get memory address of class instance * """ * def __cinit__(self, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * if _ptr == 0: * self._pvt_ptr = &self._pvt_val */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":13390 * else: * self._pvt_ptr = _ptr * def __init__(self, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * pass * self._channelDesc = cudaChannelFormatDesc(_ptr=&self._pvt_ptr[0].channelDesc) */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_3__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_3__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr; CYTHON_UNUSED Py_ssize_t __pyx_nargs; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1; #endif __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13390, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13390, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__init__", 0) < (0)) __PYX_ERR(0, 13390, __pyx_L3_error) } else { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13390, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } } if (values[0]) { __pyx_v__ptr = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[0]); if (unlikely((__pyx_v__ptr == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13390, __pyx_L3_error) } else { __pyx_v__ptr = ((__pyx_t_4cuda_8bindings_7runtime_void_ptr)0); } } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 13390, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglPlaneDesc_st.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_2__init__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *)__pyx_v_self), __pyx_v__ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_2__init__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *__pyx_v_self, CYTHON_UNUSED __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__init__", 0); /* "cuda/bindings/runtime.pyx":13392 * def __init__(self, void_ptr _ptr = 0): * pass * self._channelDesc = cudaChannelFormatDesc(_ptr=&self._pvt_ptr[0].channelDesc) # <<<<<<<<<<<<<< * def __dealloc__(self): * pass */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaChannelFormatDesc); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaChannelFormatDesc); __pyx_t_4 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)(&(__pyx_v_self->_pvt_ptr[0]).channelDesc))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13392, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_2, NULL}; __pyx_t_6 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13392, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_ptr_2, __pyx_t_4, __pyx_t_6, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 13392, __pyx_L1_error) __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder(__pyx_t_3, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_6); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13392, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __Pyx_GIVEREF((PyObject *)__pyx_t_1); __Pyx_GOTREF((PyObject *)__pyx_v_self->_channelDesc); __Pyx_DECREF((PyObject *)__pyx_v_self->_channelDesc); __pyx_v_self->_channelDesc = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaChannelFormatDesc *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":13390 * else: * self._pvt_ptr = _ptr * def __init__(self, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * pass * self._channelDesc = cudaChannelFormatDesc(_ptr=&self._pvt_ptr[0].channelDesc) */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglPlaneDesc_st.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13393 * pass * self._channelDesc = cudaChannelFormatDesc(_ptr=&self._pvt_ptr[0].channelDesc) * def __dealloc__(self): # <<<<<<<<<<<<<< * pass * def getPtr(self): */ /* Python wrapper */ static void __pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_5__dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_5__dealloc__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_4__dealloc__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_4__dealloc__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *__pyx_v_self) { /* function exit code */ } /* "cuda/bindings/runtime.pyx":13395 * def __dealloc__(self): * pass * def getPtr(self): # <<<<<<<<<<<<<< * return self._pvt_ptr * def __repr__(self): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_7getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_6getPtr, "cudaEglPlaneDesc_st.getPtr(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_7getPtr = {"getPtr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_7getPtr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_6getPtr}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_7getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("getPtr (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("getPtr", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("getPtr", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_6getPtr(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_6getPtr(struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("getPtr", 0); /* "cuda/bindings/runtime.pyx":13396 * pass * def getPtr(self): * return self._pvt_ptr # <<<<<<<<<<<<<< * def __repr__(self): * if self._pvt_ptr is not NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_v_self->_pvt_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13396, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13395 * def __dealloc__(self): * pass * def getPtr(self): # <<<<<<<<<<<<<< * return self._pvt_ptr * def __repr__(self): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglPlaneDesc_st.getPtr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13397 * def getPtr(self): * return self._pvt_ptr * def __repr__(self): # <<<<<<<<<<<<<< * if self._pvt_ptr is not NULL: * str_list = [] */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_9__repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_9__repr__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_8__repr__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_8__repr__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *__pyx_v_self) { PyObject *__pyx_v_str_list = NULL; PyObject *__pyx_10genexpr181__pyx_v_line = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; Py_ssize_t __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "cuda/bindings/runtime.pyx":13398 * return self._pvt_ptr * def __repr__(self): * if self._pvt_ptr is not NULL: # <<<<<<<<<<<<<< * str_list = [] * try: */ __pyx_t_1 = (__pyx_v_self->_pvt_ptr != NULL); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":13399 * def __repr__(self): * if self._pvt_ptr is not NULL: * str_list = [] # <<<<<<<<<<<<<< * try: * str_list += ['width : ' + str(self.width)] */ __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13399, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_str_list = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":13400 * if self._pvt_ptr is not NULL: * str_list = [] * try: # <<<<<<<<<<<<<< * str_list += ['width : ' + str(self.width)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "cuda/bindings/runtime.pyx":13401 * str_list = [] * try: * str_list += ['width : ' + str(self.width)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['width : '] */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_width_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13401, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __Pyx_PyObject_Unicode(__pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13401, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_width, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13401, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13401, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 13401, __pyx_L4_error); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13401, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_2)); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":13400 * if self._pvt_ptr is not NULL: * str_list = [] * try: # <<<<<<<<<<<<<< * str_list += ['width : ' + str(self.width)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; /* "cuda/bindings/runtime.pyx":13402 * try: * str_list += ['width : ' + str(self.width)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['width : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglPlaneDesc_st.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_2, &__pyx_t_6, &__pyx_t_8) < 0) __PYX_ERR(0, 13402, __pyx_L6_except_error) __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_8); /* "cuda/bindings/runtime.pyx":13403 * str_list += ['width : ' + str(self.width)] * except ValueError: * str_list += ['width : '] # <<<<<<<<<<<<<< * try: * str_list += ['height : ' + str(self.height)] */ __pyx_t_9 = PyList_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 13403, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_width_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_width_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 0, __pyx_mstate_global->__pyx_kp_u_width_ValueError) != (0)) __PYX_ERR(0, 13403, __pyx_L6_except_error); __pyx_t_10 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13403, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_10)); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L5_exception_handled; } goto __pyx_L6_except_error; /* "cuda/bindings/runtime.pyx":13400 * if self._pvt_ptr is not NULL: * str_list = [] * try: # <<<<<<<<<<<<<< * str_list += ['width : ' + str(self.width)] * except ValueError: */ __pyx_L6_except_error:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L5_exception_handled:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); __pyx_L9_try_end:; } /* "cuda/bindings/runtime.pyx":13404 * except ValueError: * str_list += ['width : '] * try: # <<<<<<<<<<<<<< * str_list += ['height : ' + str(self.height)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_5, &__pyx_t_4, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_5); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "cuda/bindings/runtime.pyx":13405 * str_list += ['width : '] * try: * str_list += ['height : ' + str(self.height)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['height : '] */ __pyx_t_8 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_height_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13405, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_6 = __Pyx_PyObject_Unicode(__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13405, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_height, __pyx_t_6); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13405, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13405, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_8); if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_8) != (0)) __PYX_ERR(0, 13405, __pyx_L12_error); __pyx_t_8 = 0; __pyx_t_8 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_6); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13405, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_8)); __pyx_t_8 = 0; /* "cuda/bindings/runtime.pyx":13404 * except ValueError: * str_list += ['width : '] * try: # <<<<<<<<<<<<<< * str_list += ['height : ' + str(self.height)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L17_try_end; __pyx_L12_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":13406 * try: * str_list += ['height : ' + str(self.height)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['height : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglPlaneDesc_st.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_8, &__pyx_t_6, &__pyx_t_2) < 0) __PYX_ERR(0, 13406, __pyx_L14_except_error) __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_2); /* "cuda/bindings/runtime.pyx":13407 * str_list += ['height : ' + str(self.height)] * except ValueError: * str_list += ['height : '] # <<<<<<<<<<<<<< * try: * str_list += ['depth : ' + str(self.depth)] */ __pyx_t_10 = PyList_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13407, __pyx_L14_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_height_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_height_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_10, 0, __pyx_mstate_global->__pyx_kp_u_height_ValueError) != (0)) __PYX_ERR(0, 13407, __pyx_L14_except_error); __pyx_t_9 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 13407, __pyx_L14_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_9)); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; goto __pyx_L13_exception_handled; } goto __pyx_L14_except_error; /* "cuda/bindings/runtime.pyx":13404 * except ValueError: * str_list += ['width : '] * try: # <<<<<<<<<<<<<< * str_list += ['height : ' + str(self.height)] * except ValueError: */ __pyx_L14_except_error:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); goto __pyx_L1_error; __pyx_L13_exception_handled:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); __pyx_L17_try_end:; } /* "cuda/bindings/runtime.pyx":13408 * except ValueError: * str_list += ['height : '] * try: # <<<<<<<<<<<<<< * str_list += ['depth : ' + str(self.depth)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "cuda/bindings/runtime.pyx":13409 * str_list += ['height : '] * try: * str_list += ['depth : ' + str(self.depth)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['depth : '] */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_depth_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13409, __pyx_L20_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __Pyx_PyObject_Unicode(__pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13409, __pyx_L20_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_depth, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13409, __pyx_L20_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13409, __pyx_L20_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 13409, __pyx_L20_error); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13409, __pyx_L20_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_2)); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":13408 * except ValueError: * str_list += ['height : '] * try: # <<<<<<<<<<<<<< * str_list += ['depth : ' + str(self.depth)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L25_try_end; __pyx_L20_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":13410 * try: * str_list += ['depth : ' + str(self.depth)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['depth : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglPlaneDesc_st.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_2, &__pyx_t_6, &__pyx_t_8) < 0) __PYX_ERR(0, 13410, __pyx_L22_except_error) __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_8); /* "cuda/bindings/runtime.pyx":13411 * str_list += ['depth : ' + str(self.depth)] * except ValueError: * str_list += ['depth : '] # <<<<<<<<<<<<<< * try: * str_list += ['pitch : ' + str(self.pitch)] */ __pyx_t_9 = PyList_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 13411, __pyx_L22_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_depth_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_depth_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 0, __pyx_mstate_global->__pyx_kp_u_depth_ValueError) != (0)) __PYX_ERR(0, 13411, __pyx_L22_except_error); __pyx_t_10 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13411, __pyx_L22_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_10)); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L21_exception_handled; } goto __pyx_L22_except_error; /* "cuda/bindings/runtime.pyx":13408 * except ValueError: * str_list += ['height : '] * try: # <<<<<<<<<<<<<< * str_list += ['depth : ' + str(self.depth)] * except ValueError: */ __pyx_L22_except_error:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L21_exception_handled:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); __pyx_L25_try_end:; } /* "cuda/bindings/runtime.pyx":13412 * except ValueError: * str_list += ['depth : '] * try: # <<<<<<<<<<<<<< * str_list += ['pitch : ' + str(self.pitch)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_5, &__pyx_t_4, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_5); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "cuda/bindings/runtime.pyx":13413 * str_list += ['depth : '] * try: * str_list += ['pitch : ' + str(self.pitch)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['pitch : '] */ __pyx_t_8 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_pitch_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13413, __pyx_L28_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_6 = __Pyx_PyObject_Unicode(__pyx_t_8); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13413, __pyx_L28_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_pitch, __pyx_t_6); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13413, __pyx_L28_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13413, __pyx_L28_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_8); if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_8) != (0)) __PYX_ERR(0, 13413, __pyx_L28_error); __pyx_t_8 = 0; __pyx_t_8 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_6); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13413, __pyx_L28_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_8)); __pyx_t_8 = 0; /* "cuda/bindings/runtime.pyx":13412 * except ValueError: * str_list += ['depth : '] * try: # <<<<<<<<<<<<<< * str_list += ['pitch : ' + str(self.pitch)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L33_try_end; __pyx_L28_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":13414 * try: * str_list += ['pitch : ' + str(self.pitch)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['pitch : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglPlaneDesc_st.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_8, &__pyx_t_6, &__pyx_t_2) < 0) __PYX_ERR(0, 13414, __pyx_L30_except_error) __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_2); /* "cuda/bindings/runtime.pyx":13415 * str_list += ['pitch : ' + str(self.pitch)] * except ValueError: * str_list += ['pitch : '] # <<<<<<<<<<<<<< * try: * str_list += ['numChannels : ' + str(self.numChannels)] */ __pyx_t_10 = PyList_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13415, __pyx_L30_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_pitch_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_pitch_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_10, 0, __pyx_mstate_global->__pyx_kp_u_pitch_ValueError) != (0)) __PYX_ERR(0, 13415, __pyx_L30_except_error); __pyx_t_9 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 13415, __pyx_L30_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_9)); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; goto __pyx_L29_exception_handled; } goto __pyx_L30_except_error; /* "cuda/bindings/runtime.pyx":13412 * except ValueError: * str_list += ['depth : '] * try: # <<<<<<<<<<<<<< * str_list += ['pitch : ' + str(self.pitch)] * except ValueError: */ __pyx_L30_except_error:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); goto __pyx_L1_error; __pyx_L29_exception_handled:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); __pyx_L33_try_end:; } /* "cuda/bindings/runtime.pyx":13416 * except ValueError: * str_list += ['pitch : '] * try: # <<<<<<<<<<<<<< * str_list += ['numChannels : ' + str(self.numChannels)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "cuda/bindings/runtime.pyx":13417 * str_list += ['pitch : '] * try: * str_list += ['numChannels : ' + str(self.numChannels)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['numChannels : '] */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_numChannels_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13417, __pyx_L36_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __Pyx_PyObject_Unicode(__pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13417, __pyx_L36_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_numChannels, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13417, __pyx_L36_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13417, __pyx_L36_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 13417, __pyx_L36_error); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13417, __pyx_L36_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_2)); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":13416 * except ValueError: * str_list += ['pitch : '] * try: # <<<<<<<<<<<<<< * str_list += ['numChannels : ' + str(self.numChannels)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L41_try_end; __pyx_L36_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":13418 * try: * str_list += ['numChannels : ' + str(self.numChannels)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['numChannels : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglPlaneDesc_st.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_2, &__pyx_t_6, &__pyx_t_8) < 0) __PYX_ERR(0, 13418, __pyx_L38_except_error) __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_8); /* "cuda/bindings/runtime.pyx":13419 * str_list += ['numChannels : ' + str(self.numChannels)] * except ValueError: * str_list += ['numChannels : '] # <<<<<<<<<<<<<< * try: * str_list += ['channelDesc :\n' + '\n'.join([' ' + line for line in str(self.channelDesc).splitlines()])] */ __pyx_t_9 = PyList_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 13419, __pyx_L38_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_numChannels_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_numChannels_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 0, __pyx_mstate_global->__pyx_kp_u_numChannels_ValueError) != (0)) __PYX_ERR(0, 13419, __pyx_L38_except_error); __pyx_t_10 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13419, __pyx_L38_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_10)); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L37_exception_handled; } goto __pyx_L38_except_error; /* "cuda/bindings/runtime.pyx":13416 * except ValueError: * str_list += ['pitch : '] * try: # <<<<<<<<<<<<<< * str_list += ['numChannels : ' + str(self.numChannels)] * except ValueError: */ __pyx_L38_except_error:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L37_exception_handled:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); __pyx_L41_try_end:; } /* "cuda/bindings/runtime.pyx":13420 * except ValueError: * str_list += ['numChannels : '] * try: # <<<<<<<<<<<<<< * str_list += ['channelDesc :\n' + '\n'.join([' ' + line for line in str(self.channelDesc).splitlines()])] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_5, &__pyx_t_4, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_5); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "cuda/bindings/runtime.pyx":13421 * str_list += ['numChannels : '] * try: * str_list += ['channelDesc :\n' + '\n'.join([' ' + line for line in str(self.channelDesc).splitlines()])] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['channelDesc : '] */ { /* enter inner scope */ __pyx_t_8 = PyList_New(0); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13421, __pyx_L52_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_channelDesc_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13421, __pyx_L52_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_2 = __Pyx_PyObject_Unicode(__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13421, __pyx_L52_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyUnicode_Splitlines(((PyObject*)__pyx_t_2), 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13421, __pyx_L52_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __pyx_t_6; __Pyx_INCREF(__pyx_t_2); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; for (;;) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_2); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 13421, __pyx_L52_error) #endif if (__pyx_t_11 >= __pyx_temp) break; } __pyx_t_6 = __Pyx_PyList_GetItemRef(__pyx_t_2, __pyx_t_11); ++__pyx_t_11; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13421, __pyx_L52_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_10genexpr181__pyx_v_line, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyNumber_Add(__pyx_mstate_global->__pyx_kp_u__4, __pyx_10genexpr181__pyx_v_line); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13421, __pyx_L52_error) __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_8, (PyObject*)__pyx_t_6))) __PYX_ERR(0, 13421, __pyx_L52_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_10genexpr181__pyx_v_line); __pyx_10genexpr181__pyx_v_line = 0; goto __pyx_L56_exit_scope; __pyx_L52_error:; __Pyx_XDECREF(__pyx_10genexpr181__pyx_v_line); __pyx_10genexpr181__pyx_v_line = 0; goto __pyx_L44_error; __pyx_L56_exit_scope:; } /* exit inner scope */ __pyx_t_2 = PyUnicode_Join(__pyx_mstate_global->__pyx_kp_u__2, __pyx_t_8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13421, __pyx_L44_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_channelDesc, __pyx_t_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13421, __pyx_L44_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13421, __pyx_L44_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_8); if (__Pyx_PyList_SET_ITEM(__pyx_t_2, 0, __pyx_t_8) != (0)) __PYX_ERR(0, 13421, __pyx_L44_error); __pyx_t_8 = 0; __pyx_t_8 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13421, __pyx_L44_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_8)); __pyx_t_8 = 0; /* "cuda/bindings/runtime.pyx":13420 * except ValueError: * str_list += ['numChannels : '] * try: # <<<<<<<<<<<<<< * str_list += ['channelDesc :\n' + '\n'.join([' ' + line for line in str(self.channelDesc).splitlines()])] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L49_try_end; __pyx_L44_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":13422 * try: * str_list += ['channelDesc :\n' + '\n'.join([' ' + line for line in str(self.channelDesc).splitlines()])] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['channelDesc : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglPlaneDesc_st.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_8, &__pyx_t_2, &__pyx_t_6) < 0) __PYX_ERR(0, 13422, __pyx_L46_except_error) __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_6); /* "cuda/bindings/runtime.pyx":13423 * str_list += ['channelDesc :\n' + '\n'.join([' ' + line for line in str(self.channelDesc).splitlines()])] * except ValueError: * str_list += ['channelDesc : '] # <<<<<<<<<<<<<< * try: * str_list += ['reserved : ' + str(self.reserved)] */ __pyx_t_10 = PyList_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13423, __pyx_L46_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_channelDesc_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_channelDesc_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_10, 0, __pyx_mstate_global->__pyx_kp_u_channelDesc_ValueError) != (0)) __PYX_ERR(0, 13423, __pyx_L46_except_error); __pyx_t_9 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 13423, __pyx_L46_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_9)); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; goto __pyx_L45_exception_handled; } goto __pyx_L46_except_error; /* "cuda/bindings/runtime.pyx":13420 * except ValueError: * str_list += ['numChannels : '] * try: # <<<<<<<<<<<<<< * str_list += ['channelDesc :\n' + '\n'.join([' ' + line for line in str(self.channelDesc).splitlines()])] * except ValueError: */ __pyx_L46_except_error:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); goto __pyx_L1_error; __pyx_L45_exception_handled:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); __pyx_L49_try_end:; } /* "cuda/bindings/runtime.pyx":13424 * except ValueError: * str_list += ['channelDesc : '] * try: # <<<<<<<<<<<<<< * str_list += ['reserved : ' + str(self.reserved)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "cuda/bindings/runtime.pyx":13425 * str_list += ['channelDesc : '] * try: * str_list += ['reserved : ' + str(self.reserved)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['reserved : '] */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_reserved_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13425, __pyx_L59_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_2 = __Pyx_PyObject_Unicode(__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13425, __pyx_L59_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_reserved, __pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13425, __pyx_L59_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13425, __pyx_L59_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_6); if (__Pyx_PyList_SET_ITEM(__pyx_t_2, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 13425, __pyx_L59_error); __pyx_t_6 = 0; __pyx_t_6 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13425, __pyx_L59_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_6)); __pyx_t_6 = 0; /* "cuda/bindings/runtime.pyx":13424 * except ValueError: * str_list += ['channelDesc : '] * try: # <<<<<<<<<<<<<< * str_list += ['reserved : ' + str(self.reserved)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L64_try_end; __pyx_L59_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":13426 * try: * str_list += ['reserved : ' + str(self.reserved)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['reserved : '] * return '\n'.join(str_list) */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglPlaneDesc_st.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_6, &__pyx_t_2, &__pyx_t_8) < 0) __PYX_ERR(0, 13426, __pyx_L61_except_error) __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_8); /* "cuda/bindings/runtime.pyx":13427 * str_list += ['reserved : ' + str(self.reserved)] * except ValueError: * str_list += ['reserved : '] # <<<<<<<<<<<<<< * return '\n'.join(str_list) * else: */ __pyx_t_9 = PyList_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 13427, __pyx_L61_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_reserved_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_reserved_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 0, __pyx_mstate_global->__pyx_kp_u_reserved_ValueError) != (0)) __PYX_ERR(0, 13427, __pyx_L61_except_error); __pyx_t_10 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13427, __pyx_L61_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_10)); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L60_exception_handled; } goto __pyx_L61_except_error; /* "cuda/bindings/runtime.pyx":13424 * except ValueError: * str_list += ['channelDesc : '] * try: # <<<<<<<<<<<<<< * str_list += ['reserved : ' + str(self.reserved)] * except ValueError: */ __pyx_L61_except_error:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L60_exception_handled:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); __pyx_L64_try_end:; } /* "cuda/bindings/runtime.pyx":13428 * except ValueError: * str_list += ['reserved : '] * return '\n'.join(str_list) # <<<<<<<<<<<<<< * else: * return '' */ __Pyx_XDECREF(__pyx_r); __pyx_t_8 = PyUnicode_Join(__pyx_mstate_global->__pyx_kp_u__2, __pyx_v_str_list); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13428, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_r = __pyx_t_8; __pyx_t_8 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13398 * return self._pvt_ptr * def __repr__(self): * if self._pvt_ptr is not NULL: # <<<<<<<<<<<<<< * str_list = [] * try: */ } /* "cuda/bindings/runtime.pyx":13430 * return '\n'.join(str_list) * else: * return '' # <<<<<<<<<<<<<< * @property * def width(self): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u__3); __pyx_r = __pyx_mstate_global->__pyx_kp_u__3; goto __pyx_L0; } /* "cuda/bindings/runtime.pyx":13397 * def getPtr(self): * return self._pvt_ptr * def __repr__(self): # <<<<<<<<<<<<<< * if self._pvt_ptr is not NULL: * str_list = [] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglPlaneDesc_st.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_str_list); __Pyx_XDECREF(__pyx_10genexpr181__pyx_v_line); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13431 * else: * return '' * @property # <<<<<<<<<<<<<< * def width(self): * return self._pvt_ptr[0].width */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_5width_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_5width_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_5width___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_5width___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":13433 * @property * def width(self): * return self._pvt_ptr[0].width # <<<<<<<<<<<<<< * @width.setter * def width(self, unsigned int width): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_pvt_ptr[0]).width); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13433, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13431 * else: * return '' * @property # <<<<<<<<<<<<<< * def width(self): * return self._pvt_ptr[0].width */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglPlaneDesc_st.width.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13434 * def width(self): * return self._pvt_ptr[0].width * @width.setter # <<<<<<<<<<<<<< * def width(self, unsigned int width): * self._pvt_ptr[0].width = width */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_5width_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_width); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_5width_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_width) { unsigned int __pyx_v_width; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); assert(__pyx_arg_width); { __pyx_v_width = __Pyx_PyLong_As_unsigned_int(__pyx_arg_width); if (unlikely((__pyx_v_width == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13435, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglPlaneDesc_st.width.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_5width_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *)__pyx_v_self), ((unsigned int)__pyx_v_width)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_5width_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *__pyx_v_self, unsigned int __pyx_v_width) { int __pyx_r; /* "cuda/bindings/runtime.pyx":13436 * @width.setter * def width(self, unsigned int width): * self._pvt_ptr[0].width = width # <<<<<<<<<<<<<< * @property * def height(self): */ (__pyx_v_self->_pvt_ptr[0]).width = __pyx_v_width; /* "cuda/bindings/runtime.pyx":13434 * def width(self): * return self._pvt_ptr[0].width * @width.setter # <<<<<<<<<<<<<< * def width(self, unsigned int width): * self._pvt_ptr[0].width = width */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":13437 * def width(self, unsigned int width): * self._pvt_ptr[0].width = width * @property # <<<<<<<<<<<<<< * def height(self): * return self._pvt_ptr[0].height */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_6height_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_6height_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_6height___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_6height___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":13439 * @property * def height(self): * return self._pvt_ptr[0].height # <<<<<<<<<<<<<< * @height.setter * def height(self, unsigned int height): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_pvt_ptr[0]).height); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13439, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13437 * def width(self, unsigned int width): * self._pvt_ptr[0].width = width * @property # <<<<<<<<<<<<<< * def height(self): * return self._pvt_ptr[0].height */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglPlaneDesc_st.height.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13440 * def height(self): * return self._pvt_ptr[0].height * @height.setter # <<<<<<<<<<<<<< * def height(self, unsigned int height): * self._pvt_ptr[0].height = height */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_6height_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_height); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_6height_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_height) { unsigned int __pyx_v_height; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); assert(__pyx_arg_height); { __pyx_v_height = __Pyx_PyLong_As_unsigned_int(__pyx_arg_height); if (unlikely((__pyx_v_height == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13441, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglPlaneDesc_st.height.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_6height_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *)__pyx_v_self), ((unsigned int)__pyx_v_height)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_6height_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *__pyx_v_self, unsigned int __pyx_v_height) { int __pyx_r; /* "cuda/bindings/runtime.pyx":13442 * @height.setter * def height(self, unsigned int height): * self._pvt_ptr[0].height = height # <<<<<<<<<<<<<< * @property * def depth(self): */ (__pyx_v_self->_pvt_ptr[0]).height = __pyx_v_height; /* "cuda/bindings/runtime.pyx":13440 * def height(self): * return self._pvt_ptr[0].height * @height.setter # <<<<<<<<<<<<<< * def height(self, unsigned int height): * self._pvt_ptr[0].height = height */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":13443 * def height(self, unsigned int height): * self._pvt_ptr[0].height = height * @property # <<<<<<<<<<<<<< * def depth(self): * return self._pvt_ptr[0].depth */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_5depth_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_5depth_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_5depth___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_5depth___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":13445 * @property * def depth(self): * return self._pvt_ptr[0].depth # <<<<<<<<<<<<<< * @depth.setter * def depth(self, unsigned int depth): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_pvt_ptr[0]).depth); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13445, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13443 * def height(self, unsigned int height): * self._pvt_ptr[0].height = height * @property # <<<<<<<<<<<<<< * def depth(self): * return self._pvt_ptr[0].depth */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglPlaneDesc_st.depth.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13446 * def depth(self): * return self._pvt_ptr[0].depth * @depth.setter # <<<<<<<<<<<<<< * def depth(self, unsigned int depth): * self._pvt_ptr[0].depth = depth */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_5depth_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_depth); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_5depth_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_depth) { unsigned int __pyx_v_depth; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); assert(__pyx_arg_depth); { __pyx_v_depth = __Pyx_PyLong_As_unsigned_int(__pyx_arg_depth); if (unlikely((__pyx_v_depth == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13447, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglPlaneDesc_st.depth.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_5depth_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *)__pyx_v_self), ((unsigned int)__pyx_v_depth)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_5depth_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *__pyx_v_self, unsigned int __pyx_v_depth) { int __pyx_r; /* "cuda/bindings/runtime.pyx":13448 * @depth.setter * def depth(self, unsigned int depth): * self._pvt_ptr[0].depth = depth # <<<<<<<<<<<<<< * @property * def pitch(self): */ (__pyx_v_self->_pvt_ptr[0]).depth = __pyx_v_depth; /* "cuda/bindings/runtime.pyx":13446 * def depth(self): * return self._pvt_ptr[0].depth * @depth.setter # <<<<<<<<<<<<<< * def depth(self, unsigned int depth): * self._pvt_ptr[0].depth = depth */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":13449 * def depth(self, unsigned int depth): * self._pvt_ptr[0].depth = depth * @property # <<<<<<<<<<<<<< * def pitch(self): * return self._pvt_ptr[0].pitch */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_5pitch_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_5pitch_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_5pitch___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_5pitch___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":13451 * @property * def pitch(self): * return self._pvt_ptr[0].pitch # <<<<<<<<<<<<<< * @pitch.setter * def pitch(self, unsigned int pitch): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_pvt_ptr[0]).pitch); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13451, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13449 * def depth(self, unsigned int depth): * self._pvt_ptr[0].depth = depth * @property # <<<<<<<<<<<<<< * def pitch(self): * return self._pvt_ptr[0].pitch */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglPlaneDesc_st.pitch.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13452 * def pitch(self): * return self._pvt_ptr[0].pitch * @pitch.setter # <<<<<<<<<<<<<< * def pitch(self, unsigned int pitch): * self._pvt_ptr[0].pitch = pitch */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_5pitch_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_pitch); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_5pitch_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_pitch) { unsigned int __pyx_v_pitch; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); assert(__pyx_arg_pitch); { __pyx_v_pitch = __Pyx_PyLong_As_unsigned_int(__pyx_arg_pitch); if (unlikely((__pyx_v_pitch == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13453, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglPlaneDesc_st.pitch.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_5pitch_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *)__pyx_v_self), ((unsigned int)__pyx_v_pitch)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_5pitch_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *__pyx_v_self, unsigned int __pyx_v_pitch) { int __pyx_r; /* "cuda/bindings/runtime.pyx":13454 * @pitch.setter * def pitch(self, unsigned int pitch): * self._pvt_ptr[0].pitch = pitch # <<<<<<<<<<<<<< * @property * def numChannels(self): */ (__pyx_v_self->_pvt_ptr[0]).pitch = __pyx_v_pitch; /* "cuda/bindings/runtime.pyx":13452 * def pitch(self): * return self._pvt_ptr[0].pitch * @pitch.setter # <<<<<<<<<<<<<< * def pitch(self, unsigned int pitch): * self._pvt_ptr[0].pitch = pitch */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":13455 * def pitch(self, unsigned int pitch): * self._pvt_ptr[0].pitch = pitch * @property # <<<<<<<<<<<<<< * def numChannels(self): * return self._pvt_ptr[0].numChannels */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_11numChannels_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_11numChannels_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_11numChannels___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_11numChannels___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":13457 * @property * def numChannels(self): * return self._pvt_ptr[0].numChannels # <<<<<<<<<<<<<< * @numChannels.setter * def numChannels(self, unsigned int numChannels): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_pvt_ptr[0]).numChannels); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13457, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13455 * def pitch(self, unsigned int pitch): * self._pvt_ptr[0].pitch = pitch * @property # <<<<<<<<<<<<<< * def numChannels(self): * return self._pvt_ptr[0].numChannels */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglPlaneDesc_st.numChannels.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13458 * def numChannels(self): * return self._pvt_ptr[0].numChannels * @numChannels.setter # <<<<<<<<<<<<<< * def numChannels(self, unsigned int numChannels): * self._pvt_ptr[0].numChannels = numChannels */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_11numChannels_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_numChannels); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_11numChannels_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_numChannels) { unsigned int __pyx_v_numChannels; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); assert(__pyx_arg_numChannels); { __pyx_v_numChannels = __Pyx_PyLong_As_unsigned_int(__pyx_arg_numChannels); if (unlikely((__pyx_v_numChannels == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13459, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglPlaneDesc_st.numChannels.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_11numChannels_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *)__pyx_v_self), ((unsigned int)__pyx_v_numChannels)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_11numChannels_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *__pyx_v_self, unsigned int __pyx_v_numChannels) { int __pyx_r; /* "cuda/bindings/runtime.pyx":13460 * @numChannels.setter * def numChannels(self, unsigned int numChannels): * self._pvt_ptr[0].numChannels = numChannels # <<<<<<<<<<<<<< * @property * def channelDesc(self): */ (__pyx_v_self->_pvt_ptr[0]).numChannels = __pyx_v_numChannels; /* "cuda/bindings/runtime.pyx":13458 * def numChannels(self): * return self._pvt_ptr[0].numChannels * @numChannels.setter # <<<<<<<<<<<<<< * def numChannels(self, unsigned int numChannels): * self._pvt_ptr[0].numChannels = numChannels */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":13461 * def numChannels(self, unsigned int numChannels): * self._pvt_ptr[0].numChannels = numChannels * @property # <<<<<<<<<<<<<< * def channelDesc(self): * return self._channelDesc */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_11channelDesc_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_11channelDesc_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_11channelDesc___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_11channelDesc___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":13463 * @property * def channelDesc(self): * return self._channelDesc # <<<<<<<<<<<<<< * @channelDesc.setter * def channelDesc(self, channelDesc not None : cudaChannelFormatDesc): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF((PyObject *)__pyx_v_self->_channelDesc); __pyx_r = ((PyObject *)__pyx_v_self->_channelDesc); goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13461 * def numChannels(self, unsigned int numChannels): * self._pvt_ptr[0].numChannels = numChannels * @property # <<<<<<<<<<<<<< * def channelDesc(self): * return self._channelDesc */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13464 * def channelDesc(self): * return self._channelDesc * @channelDesc.setter # <<<<<<<<<<<<<< * def channelDesc(self, channelDesc not None : cudaChannelFormatDesc): * string.memcpy(&self._pvt_ptr[0].channelDesc, channelDesc.getPtr(), sizeof(self._pvt_ptr[0].channelDesc)) */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_11channelDesc_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_channelDesc); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_11channelDesc_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_channelDesc) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_channelDesc), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaChannelFormatDesc, 0, "channelDesc", 0))) __PYX_ERR(0, 13465, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_11channelDesc_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *)__pyx_v_self), ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaChannelFormatDesc *)__pyx_v_channelDesc)); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; goto __pyx_L5_cleaned_up; __pyx_L0:; __pyx_L5_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_11channelDesc_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *__pyx_v_self, struct __pyx_obj_4cuda_8bindings_7runtime_cudaChannelFormatDesc *__pyx_v_channelDesc) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; size_t __pyx_t_3; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); /* "cuda/bindings/runtime.pyx":13466 * @channelDesc.setter * def channelDesc(self, channelDesc not None : cudaChannelFormatDesc): * string.memcpy(&self._pvt_ptr[0].channelDesc, channelDesc.getPtr(), sizeof(self._pvt_ptr[0].channelDesc)) # <<<<<<<<<<<<<< * @property * def reserved(self): */ __pyx_t_2 = ((PyObject *)__pyx_v_channelDesc); __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCallMethod(__pyx_mstate_global->__pyx_n_u_getPtr, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13466, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13466, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; (void)(memcpy((&(__pyx_v_self->_pvt_ptr[0]).channelDesc), ((struct cudaChannelFormatDesc *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_4)), (sizeof((__pyx_v_self->_pvt_ptr[0]).channelDesc)))); /* "cuda/bindings/runtime.pyx":13464 * def channelDesc(self): * return self._channelDesc * @channelDesc.setter # <<<<<<<<<<<<<< * def channelDesc(self, channelDesc not None : cudaChannelFormatDesc): * string.memcpy(&self._pvt_ptr[0].channelDesc, channelDesc.getPtr(), sizeof(self._pvt_ptr[0].channelDesc)) */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglPlaneDesc_st.channelDesc.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13467 * def channelDesc(self, channelDesc not None : cudaChannelFormatDesc): * string.memcpy(&self._pvt_ptr[0].channelDesc, channelDesc.getPtr(), sizeof(self._pvt_ptr[0].channelDesc)) * @property # <<<<<<<<<<<<<< * def reserved(self): * return self._pvt_ptr[0].reserved */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_8reserved_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_8reserved_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_8reserved___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_8reserved___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":13469 * @property * def reserved(self): * return self._pvt_ptr[0].reserved # <<<<<<<<<<<<<< * @reserved.setter * def reserved(self, reserved): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_carray_to_py_unsigned_int((__pyx_v_self->_pvt_ptr[0]).reserved, 4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13469, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13467 * def channelDesc(self, channelDesc not None : cudaChannelFormatDesc): * string.memcpy(&self._pvt_ptr[0].channelDesc, channelDesc.getPtr(), sizeof(self._pvt_ptr[0].channelDesc)) * @property # <<<<<<<<<<<<<< * def reserved(self): * return self._pvt_ptr[0].reserved */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglPlaneDesc_st.reserved.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13470 * def reserved(self): * return self._pvt_ptr[0].reserved * @reserved.setter # <<<<<<<<<<<<<< * def reserved(self, reserved): * self._pvt_ptr[0].reserved = reserved */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_8reserved_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_reserved); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_8reserved_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_reserved) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_8reserved_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *)__pyx_v_self), ((PyObject *)__pyx_v_reserved)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_8reserved_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *__pyx_v_self, PyObject *__pyx_v_reserved) { int __pyx_r; unsigned int __pyx_t_1[4]; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "cuda/bindings/runtime.pyx":13472 * @reserved.setter * def reserved(self, reserved): * self._pvt_ptr[0].reserved = reserved # <<<<<<<<<<<<<< * * cdef class anon_union11: */ if (unlikely((__Pyx_carray_from_py_unsigned_int(__pyx_v_reserved, __pyx_t_1, 4) < 0))) __PYX_ERR(0, 13472, __pyx_L1_error) memcpy(&((__pyx_v_self->_pvt_ptr[0]).reserved[0]), __pyx_t_1, sizeof((__pyx_v_self->_pvt_ptr[0]).reserved[0]) * (4)); /* "cuda/bindings/runtime.pyx":13470 * def reserved(self): * return self._pvt_ptr[0].reserved * @reserved.setter # <<<<<<<<<<<<<< * def reserved(self, reserved): * self._pvt_ptr[0].reserved = reserved */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglPlaneDesc_st.reserved.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_10__reduce_cython__, "cudaEglPlaneDesc_st.__reduce_cython__(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_11__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_11__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_10__reduce_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_10__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_10__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglPlaneDesc_st.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_12__setstate_cython__, "cudaEglPlaneDesc_st.__setstate_cython__(self, __pyx_state)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_13__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_13__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_12__setstate_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(2, 3, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(2, 3, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(2, 3, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) } __pyx_v___pyx_state = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(2, 3, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglPlaneDesc_st.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_12__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *)__pyx_v_self), __pyx_v___pyx_state); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_19cudaEglPlaneDesc_st_12__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglPlaneDesc_st *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglPlaneDesc_st.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13488 * Get memory address of class instance * """ * def __cinit__(self, void_ptr _ptr): # <<<<<<<<<<<<<< * self._pvt_ptr = _ptr * */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_12anon_union11_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_12anon_union11_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr; CYTHON_UNUSED Py_ssize_t __pyx_nargs; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1; #endif __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13488, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13488, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__cinit__", 0) < (0)) __PYX_ERR(0, 13488, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 1, 1, i); __PYX_ERR(0, 13488, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13488, __pyx_L3_error) } __pyx_v__ptr = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[0]); if (unlikely((__pyx_v__ptr == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13488, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 13488, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.anon_union11.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_12anon_union11___cinit__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_union11 *)__pyx_v_self), __pyx_v__ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_12anon_union11___cinit__(struct __pyx_obj_4cuda_8bindings_7runtime_anon_union11 *__pyx_v_self, __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr) { int __pyx_r; /* "cuda/bindings/runtime.pyx":13489 * """ * def __cinit__(self, void_ptr _ptr): * self._pvt_ptr = _ptr # <<<<<<<<<<<<<< * * def __init__(self, void_ptr _ptr): */ __pyx_v_self->_pvt_ptr = ((struct __pyx_t_4cuda_8bindings_9cyruntime_cudaEglFrame_st *)__pyx_v__ptr); /* "cuda/bindings/runtime.pyx":13488 * Get memory address of class instance * """ * def __cinit__(self, void_ptr _ptr): # <<<<<<<<<<<<<< * self._pvt_ptr = _ptr * */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":13491 * self._pvt_ptr = _ptr * * def __init__(self, void_ptr _ptr): # <<<<<<<<<<<<<< * pass * def __dealloc__(self): */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_12anon_union11_3__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_12anon_union11_3__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr; CYTHON_UNUSED Py_ssize_t __pyx_nargs; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1; #endif __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13491, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13491, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__init__", 0) < (0)) __PYX_ERR(0, 13491, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, i); __PYX_ERR(0, 13491, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13491, __pyx_L3_error) } __pyx_v__ptr = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[0]); if (unlikely((__pyx_v__ptr == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13491, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 13491, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.anon_union11.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_12anon_union11_2__init__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_union11 *)__pyx_v_self), __pyx_v__ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_12anon_union11_2__init__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_anon_union11 *__pyx_v_self, CYTHON_UNUSED __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr) { int __pyx_r; /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":13493 * def __init__(self, void_ptr _ptr): * pass * def __dealloc__(self): # <<<<<<<<<<<<<< * pass * def getPtr(self): */ /* Python wrapper */ static void __pyx_pw_4cuda_8bindings_7runtime_12anon_union11_5__dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_pw_4cuda_8bindings_7runtime_12anon_union11_5__dealloc__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_pf_4cuda_8bindings_7runtime_12anon_union11_4__dealloc__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_union11 *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_4cuda_8bindings_7runtime_12anon_union11_4__dealloc__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_anon_union11 *__pyx_v_self) { /* function exit code */ } /* "cuda/bindings/runtime.pyx":13495 * def __dealloc__(self): * pass * def getPtr(self): # <<<<<<<<<<<<<< * return &self._pvt_ptr[0].frame * def __repr__(self): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_12anon_union11_7getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_12anon_union11_6getPtr, "anon_union11.getPtr(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_12anon_union11_7getPtr = {"getPtr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_12anon_union11_7getPtr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_12anon_union11_6getPtr}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_12anon_union11_7getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("getPtr (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("getPtr", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("getPtr", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_12anon_union11_6getPtr(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_union11 *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_12anon_union11_6getPtr(struct __pyx_obj_4cuda_8bindings_7runtime_anon_union11 *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("getPtr", 0); /* "cuda/bindings/runtime.pyx":13496 * pass * def getPtr(self): * return &self._pvt_ptr[0].frame # <<<<<<<<<<<<<< * def __repr__(self): * if self._pvt_ptr is not NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)(&(__pyx_v_self->_pvt_ptr[0]).frame))); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13496, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13495 * def __dealloc__(self): * pass * def getPtr(self): # <<<<<<<<<<<<<< * return &self._pvt_ptr[0].frame * def __repr__(self): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.anon_union11.getPtr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13497 * def getPtr(self): * return &self._pvt_ptr[0].frame * def __repr__(self): # <<<<<<<<<<<<<< * if self._pvt_ptr is not NULL: * str_list = [] */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_12anon_union11_9__repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_12anon_union11_9__repr__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_12anon_union11_8__repr__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_union11 *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_12anon_union11_8__repr__(struct __pyx_obj_4cuda_8bindings_7runtime_anon_union11 *__pyx_v_self) { PyObject *__pyx_v_str_list = NULL; PyObject *__pyx_10genexpr182__pyx_v_line = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; Py_ssize_t __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "cuda/bindings/runtime.pyx":13498 * return &self._pvt_ptr[0].frame * def __repr__(self): * if self._pvt_ptr is not NULL: # <<<<<<<<<<<<<< * str_list = [] * try: */ __pyx_t_1 = (__pyx_v_self->_pvt_ptr != NULL); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":13499 * def __repr__(self): * if self._pvt_ptr is not NULL: * str_list = [] # <<<<<<<<<<<<<< * try: * str_list += ['pArray : ' + str(self.pArray)] */ __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13499, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_str_list = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":13500 * if self._pvt_ptr is not NULL: * str_list = [] * try: # <<<<<<<<<<<<<< * str_list += ['pArray : ' + str(self.pArray)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "cuda/bindings/runtime.pyx":13501 * str_list = [] * try: * str_list += ['pArray : ' + str(self.pArray)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['pArray : '] */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_pArray_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13501, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __Pyx_PyObject_Unicode(__pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13501, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_pArray, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13501, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyList_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13501, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyList_SET_ITEM(__pyx_t_6, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 13501, __pyx_L4_error); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13501, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_2)); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":13500 * if self._pvt_ptr is not NULL: * str_list = [] * try: # <<<<<<<<<<<<<< * str_list += ['pArray : ' + str(self.pArray)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; /* "cuda/bindings/runtime.pyx":13502 * try: * str_list += ['pArray : ' + str(self.pArray)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['pArray : '] * try: */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.anon_union11.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_2, &__pyx_t_6, &__pyx_t_8) < 0) __PYX_ERR(0, 13502, __pyx_L6_except_error) __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_8); /* "cuda/bindings/runtime.pyx":13503 * str_list += ['pArray : ' + str(self.pArray)] * except ValueError: * str_list += ['pArray : '] # <<<<<<<<<<<<<< * try: * str_list += ['pPitch :\n' + '\n'.join([' ' + line for line in str(self.pPitch).splitlines()])] */ __pyx_t_9 = PyList_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 13503, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_pArray_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_pArray_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_9, 0, __pyx_mstate_global->__pyx_kp_u_pArray_ValueError) != (0)) __PYX_ERR(0, 13503, __pyx_L6_except_error); __pyx_t_10 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13503, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_10)); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L5_exception_handled; } goto __pyx_L6_except_error; /* "cuda/bindings/runtime.pyx":13500 * if self._pvt_ptr is not NULL: * str_list = [] * try: # <<<<<<<<<<<<<< * str_list += ['pArray : ' + str(self.pArray)] * except ValueError: */ __pyx_L6_except_error:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L5_exception_handled:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); __pyx_L9_try_end:; } /* "cuda/bindings/runtime.pyx":13504 * except ValueError: * str_list += ['pArray : '] * try: # <<<<<<<<<<<<<< * str_list += ['pPitch :\n' + '\n'.join([' ' + line for line in str(self.pPitch).splitlines()])] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_5, &__pyx_t_4, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_5); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "cuda/bindings/runtime.pyx":13505 * str_list += ['pArray : '] * try: * str_list += ['pPitch :\n' + '\n'.join([' ' + line for line in str(self.pPitch).splitlines()])] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['pPitch : '] */ { /* enter inner scope */ __pyx_t_8 = PyList_New(0); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13505, __pyx_L20_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_pPitch_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13505, __pyx_L20_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_2 = __Pyx_PyObject_Unicode(__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13505, __pyx_L20_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyUnicode_Splitlines(((PyObject*)__pyx_t_2), 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13505, __pyx_L20_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __pyx_t_6; __Pyx_INCREF(__pyx_t_2); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; for (;;) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_2); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 13505, __pyx_L20_error) #endif if (__pyx_t_11 >= __pyx_temp) break; } __pyx_t_6 = __Pyx_PyList_GetItemRef(__pyx_t_2, __pyx_t_11); ++__pyx_t_11; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13505, __pyx_L20_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_10genexpr182__pyx_v_line, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyNumber_Add(__pyx_mstate_global->__pyx_kp_u__4, __pyx_10genexpr182__pyx_v_line); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13505, __pyx_L20_error) __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_8, (PyObject*)__pyx_t_6))) __PYX_ERR(0, 13505, __pyx_L20_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_10genexpr182__pyx_v_line); __pyx_10genexpr182__pyx_v_line = 0; goto __pyx_L24_exit_scope; __pyx_L20_error:; __Pyx_XDECREF(__pyx_10genexpr182__pyx_v_line); __pyx_10genexpr182__pyx_v_line = 0; goto __pyx_L12_error; __pyx_L24_exit_scope:; } /* exit inner scope */ __pyx_t_2 = PyUnicode_Join(__pyx_mstate_global->__pyx_kp_u__2, __pyx_t_8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13505, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_8 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_pPitch, __pyx_t_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13505, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13505, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_8); if (__Pyx_PyList_SET_ITEM(__pyx_t_2, 0, __pyx_t_8) != (0)) __PYX_ERR(0, 13505, __pyx_L12_error); __pyx_t_8 = 0; __pyx_t_8 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13505, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_8)); __pyx_t_8 = 0; /* "cuda/bindings/runtime.pyx":13504 * except ValueError: * str_list += ['pArray : '] * try: # <<<<<<<<<<<<<< * str_list += ['pPitch :\n' + '\n'.join([' ' + line for line in str(self.pPitch).splitlines()])] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L17_try_end; __pyx_L12_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "cuda/bindings/runtime.pyx":13506 * try: * str_list += ['pPitch :\n' + '\n'.join([' ' + line for line in str(self.pPitch).splitlines()])] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['pPitch : '] * return '\n'.join(str_list) */ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_7) { __Pyx_AddTraceback("cuda.bindings.runtime.anon_union11.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_8, &__pyx_t_2, &__pyx_t_6) < 0) __PYX_ERR(0, 13506, __pyx_L14_except_error) __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_6); /* "cuda/bindings/runtime.pyx":13507 * str_list += ['pPitch :\n' + '\n'.join([' ' + line for line in str(self.pPitch).splitlines()])] * except ValueError: * str_list += ['pPitch : '] # <<<<<<<<<<<<<< * return '\n'.join(str_list) * else: */ __pyx_t_10 = PyList_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13507, __pyx_L14_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_pPitch_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_pPitch_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_10, 0, __pyx_mstate_global->__pyx_kp_u_pPitch_ValueError) != (0)) __PYX_ERR(0, 13507, __pyx_L14_except_error); __pyx_t_9 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 13507, __pyx_L14_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_9)); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; goto __pyx_L13_exception_handled; } goto __pyx_L14_except_error; /* "cuda/bindings/runtime.pyx":13504 * except ValueError: * str_list += ['pArray : '] * try: # <<<<<<<<<<<<<< * str_list += ['pPitch :\n' + '\n'.join([' ' + line for line in str(self.pPitch).splitlines()])] * except ValueError: */ __pyx_L14_except_error:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); goto __pyx_L1_error; __pyx_L13_exception_handled:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); __pyx_L17_try_end:; } /* "cuda/bindings/runtime.pyx":13508 * except ValueError: * str_list += ['pPitch : '] * return '\n'.join(str_list) # <<<<<<<<<<<<<< * else: * return '' */ __Pyx_XDECREF(__pyx_r); __pyx_t_6 = PyUnicode_Join(__pyx_mstate_global->__pyx_kp_u__2, __pyx_v_str_list); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13508, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13498 * return &self._pvt_ptr[0].frame * def __repr__(self): * if self._pvt_ptr is not NULL: # <<<<<<<<<<<<<< * str_list = [] * try: */ } /* "cuda/bindings/runtime.pyx":13510 * return '\n'.join(str_list) * else: * return '' # <<<<<<<<<<<<<< * @property * def pArray(self): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u__3); __pyx_r = __pyx_mstate_global->__pyx_kp_u__3; goto __pyx_L0; } /* "cuda/bindings/runtime.pyx":13497 * def getPtr(self): * return &self._pvt_ptr[0].frame * def __repr__(self): # <<<<<<<<<<<<<< * if self._pvt_ptr is not NULL: * str_list = [] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("cuda.bindings.runtime.anon_union11.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_str_list); __Pyx_XDECREF(__pyx_10genexpr182__pyx_v_line); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13511 * else: * return '' * @property # <<<<<<<<<<<<<< * def pArray(self): * return [cudaArray_t(init_value=_pArray) for _pArray in self._pvt_ptr[0].frame.pArray] */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_12anon_union11_6pArray_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_12anon_union11_6pArray_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_12anon_union11_6pArray___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_union11 *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_12anon_union11_6pArray___get__(struct __pyx_obj_4cuda_8bindings_7runtime_anon_union11 *__pyx_v_self) { cudaArray_t __pyx_10genexpr183__pyx_v__pArray; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; cudaArray_t *__pyx_t_2; cudaArray_t *__pyx_t_3; cudaArray_t *__pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; size_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":13513 * @property * def pArray(self): * return [cudaArray_t(init_value=_pArray) for _pArray in self._pvt_ptr[0].frame.pArray] # <<<<<<<<<<<<<< * @pArray.setter * def pArray(self, pArray : list[cudaArray_t]): */ __Pyx_XDECREF(__pyx_r); { /* enter inner scope */ __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13513, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = ((__pyx_v_self->_pvt_ptr[0]).frame.pArray + 3); for (__pyx_t_4 = (__pyx_v_self->_pvt_ptr[0]).frame.pArray; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_10genexpr183__pyx_v__pArray = (__pyx_t_2[0]); __pyx_t_6 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); __pyx_t_7 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); __pyx_t_8 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_10genexpr183__pyx_v__pArray)); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 13513, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_9 = 1; { PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_6, NULL}; __pyx_t_10 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13513, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_init_value, __pyx_t_8, __pyx_t_10, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 13513, __pyx_L1_error) __pyx_t_5 = __Pyx_Object_Vectorcall_CallFromBuilder(__pyx_t_7, __pyx_callargs+__pyx_t_9, (1-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_10); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13513, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(0, 13513, __pyx_L1_error) __Pyx_DECREF((PyObject *)__pyx_t_5); __pyx_t_5 = 0; } } /* exit inner scope */ __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13511 * else: * return '' * @property # <<<<<<<<<<<<<< * def pArray(self): * return [cudaArray_t(init_value=_pArray) for _pArray in self._pvt_ptr[0].frame.pArray] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("cuda.bindings.runtime.anon_union11.pArray.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13514 * def pArray(self): * return [cudaArray_t(init_value=_pArray) for _pArray in self._pvt_ptr[0].frame.pArray] * @pArray.setter # <<<<<<<<<<<<<< * def pArray(self, pArray : list[cudaArray_t]): * if len(pArray) != 3: */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_12anon_union11_6pArray_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_pArray); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_12anon_union11_6pArray_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_pArray) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_pArray), (&PyList_Type), 0, "pArray", 2))) __PYX_ERR(0, 13515, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_12anon_union11_6pArray_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_union11 *)__pyx_v_self), ((PyObject*)__pyx_v_pArray)); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; goto __pyx_L5_cleaned_up; __pyx_L0:; __pyx_L5_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_12anon_union11_6pArray_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_anon_union11 *__pyx_v_self, PyObject *__pyx_v_pArray) { Py_ssize_t __pyx_v__idx; PyObject *__pyx_v__pArray = NULL; PyObject *__pyx_10genexpr184__pyx_v__pArray = NULL; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; size_t __pyx_t_7; Py_ssize_t __pyx_t_8; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); __Pyx_INCREF(__pyx_v_pArray); /* "cuda/bindings/runtime.pyx":13516 * @pArray.setter * def pArray(self, pArray : list[cudaArray_t]): * if len(pArray) != 3: # <<<<<<<<<<<<<< * raise IndexError('not enough values found during array assignment, expected 3, got', len(pArray)) * pArray = [int(_pArray) for _pArray in pArray] */ __pyx_t_1 = __Pyx_PyList_GET_SIZE(__pyx_v_pArray); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 13516, __pyx_L1_error) __pyx_t_2 = (__pyx_t_1 != 3); if (unlikely(__pyx_t_2)) { /* "cuda/bindings/runtime.pyx":13517 * def pArray(self, pArray : list[cudaArray_t]): * if len(pArray) != 3: * raise IndexError('not enough values found during array assignment, expected 3, got', len(pArray)) # <<<<<<<<<<<<<< * pArray = [int(_pArray) for _pArray in pArray] * for _idx, _pArray in enumerate(pArray): */ __pyx_t_4 = NULL; __Pyx_INCREF(__pyx_builtin_IndexError); __pyx_t_5 = __pyx_builtin_IndexError; __pyx_t_1 = __Pyx_PyList_GET_SIZE(__pyx_v_pArray); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 13517, __pyx_L1_error) __pyx_t_6 = PyLong_FromSsize_t(__pyx_t_1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13517, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = 1; { PyObject *__pyx_callargs[3] = {__pyx_t_4, __pyx_mstate_global->__pyx_kp_u_not_enough_values_found_during_a, __pyx_t_6}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_7, (3-__pyx_t_7) | (__pyx_t_7*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13517, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 13517, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":13516 * @pArray.setter * def pArray(self, pArray : list[cudaArray_t]): * if len(pArray) != 3: # <<<<<<<<<<<<<< * raise IndexError('not enough values found during array assignment, expected 3, got', len(pArray)) * pArray = [int(_pArray) for _pArray in pArray] */ } /* "cuda/bindings/runtime.pyx":13518 * if len(pArray) != 3: * raise IndexError('not enough values found during array assignment, expected 3, got', len(pArray)) * pArray = [int(_pArray) for _pArray in pArray] # <<<<<<<<<<<<<< * for _idx, _pArray in enumerate(pArray): * self._pvt_ptr[0].frame.pArray[_idx] = _pArray */ { /* enter inner scope */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13518, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __pyx_v_pArray; __Pyx_INCREF(__pyx_t_5); __pyx_t_1 = 0; for (;;) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_5); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 13518, __pyx_L6_error) #endif if (__pyx_t_1 >= __pyx_temp) break; } __pyx_t_6 = __Pyx_PyList_GetItemRef(__pyx_t_5, __pyx_t_1); ++__pyx_t_1; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13518, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_10genexpr184__pyx_v__pArray, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyNumber_Int(__pyx_10genexpr184__pyx_v__pArray); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13518, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_6))) __PYX_ERR(0, 13518, __pyx_L6_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_10genexpr184__pyx_v__pArray); __pyx_10genexpr184__pyx_v__pArray = 0; goto __pyx_L10_exit_scope; __pyx_L6_error:; __Pyx_XDECREF(__pyx_10genexpr184__pyx_v__pArray); __pyx_10genexpr184__pyx_v__pArray = 0; goto __pyx_L1_error; __pyx_L10_exit_scope:; } /* exit inner scope */ __Pyx_DECREF_SET(__pyx_v_pArray, ((PyObject*)__pyx_t_3)); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":13519 * raise IndexError('not enough values found during array assignment, expected 3, got', len(pArray)) * pArray = [int(_pArray) for _pArray in pArray] * for _idx, _pArray in enumerate(pArray): # <<<<<<<<<<<<<< * self._pvt_ptr[0].frame.pArray[_idx] = _pArray * */ __pyx_t_1 = 0; __pyx_t_3 = __pyx_v_pArray; __Pyx_INCREF(__pyx_t_3); __pyx_t_8 = 0; for (;;) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_3); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 13519, __pyx_L1_error) #endif if (__pyx_t_8 >= __pyx_temp) break; } __pyx_t_5 = __Pyx_PyList_GetItemRef(__pyx_t_3, __pyx_t_8); ++__pyx_t_8; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13519, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_XDECREF_SET(__pyx_v__pArray, __pyx_t_5); __pyx_t_5 = 0; __pyx_v__idx = __pyx_t_1; __pyx_t_1 = (__pyx_t_1 + 1); /* "cuda/bindings/runtime.pyx":13520 * pArray = [int(_pArray) for _pArray in pArray] * for _idx, _pArray in enumerate(pArray): * self._pvt_ptr[0].frame.pArray[_idx] = _pArray # <<<<<<<<<<<<<< * * @property */ __pyx_t_9 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v__pArray); if (unlikely((__pyx_t_9 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13520, __pyx_L1_error) ((__pyx_v_self->_pvt_ptr[0]).frame.pArray[__pyx_v__idx]) = ((cudaArray_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_9)); /* "cuda/bindings/runtime.pyx":13519 * raise IndexError('not enough values found during array assignment, expected 3, got', len(pArray)) * pArray = [int(_pArray) for _pArray in pArray] * for _idx, _pArray in enumerate(pArray): # <<<<<<<<<<<<<< * self._pvt_ptr[0].frame.pArray[_idx] = _pArray * */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":13514 * def pArray(self): * return [cudaArray_t(init_value=_pArray) for _pArray in self._pvt_ptr[0].frame.pArray] * @pArray.setter # <<<<<<<<<<<<<< * def pArray(self, pArray : list[cudaArray_t]): * if len(pArray) != 3: */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("cuda.bindings.runtime.anon_union11.pArray.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v__pArray); __Pyx_XDECREF(__pyx_10genexpr184__pyx_v__pArray); __Pyx_XDECREF(__pyx_v_pArray); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13522 * self._pvt_ptr[0].frame.pArray[_idx] = _pArray * * @property # <<<<<<<<<<<<<< * def pPitch(self): * out_pPitch = [cudaPitchedPtr() for _pPitch in self._pvt_ptr[0].frame.pPitch] */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_12anon_union11_6pPitch_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_12anon_union11_6pPitch_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_12anon_union11_6pPitch___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_union11 *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_12anon_union11_6pPitch___get__(struct __pyx_obj_4cuda_8bindings_7runtime_anon_union11 *__pyx_v_self) { PyObject *__pyx_v_out_pPitch = NULL; Py_ssize_t __pyx_v__idx; CYTHON_UNUSED struct cudaPitchedPtr __pyx_10genexpr185__pyx_v__pPitch; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; struct cudaPitchedPtr *__pyx_t_2; struct cudaPitchedPtr *__pyx_t_3; struct cudaPitchedPtr *__pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; size_t __pyx_t_8; Py_ssize_t __pyx_t_9; Py_ssize_t __pyx_t_10; Py_ssize_t __pyx_t_11; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":13524 * @property * def pPitch(self): * out_pPitch = [cudaPitchedPtr() for _pPitch in self._pvt_ptr[0].frame.pPitch] # <<<<<<<<<<<<<< * for _idx in range(len(out_pPitch)): * string.memcpy(out_pPitch[_idx].getPtr(), &self._pvt_ptr[0].frame.pPitch[_idx], sizeof(cyruntime.cudaPitchedPtr)) */ { /* enter inner scope */ __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13524, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = ((__pyx_v_self->_pvt_ptr[0]).frame.pPitch + 3); for (__pyx_t_4 = (__pyx_v_self->_pvt_ptr[0]).frame.pPitch; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_10genexpr185__pyx_v__pPitch = (__pyx_t_2[0]); __pyx_t_6 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaPitchedPtr); __pyx_t_7 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaPitchedPtr); __pyx_t_8 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_6, NULL}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_7, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13524, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(0, 13524, __pyx_L1_error) __Pyx_DECREF((PyObject *)__pyx_t_5); __pyx_t_5 = 0; } } /* exit inner scope */ __pyx_v_out_pPitch = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":13525 * def pPitch(self): * out_pPitch = [cudaPitchedPtr() for _pPitch in self._pvt_ptr[0].frame.pPitch] * for _idx in range(len(out_pPitch)): # <<<<<<<<<<<<<< * string.memcpy(out_pPitch[_idx].getPtr(), &self._pvt_ptr[0].frame.pPitch[_idx], sizeof(cyruntime.cudaPitchedPtr)) * return out_pPitch */ __pyx_t_9 = __Pyx_PyList_GET_SIZE(__pyx_v_out_pPitch); if (unlikely(__pyx_t_9 == ((Py_ssize_t)-1))) __PYX_ERR(0, 13525, __pyx_L1_error) __pyx_t_10 = __pyx_t_9; for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) { __pyx_v__idx = __pyx_t_11; /* "cuda/bindings/runtime.pyx":13526 * out_pPitch = [cudaPitchedPtr() for _pPitch in self._pvt_ptr[0].frame.pPitch] * for _idx in range(len(out_pPitch)): * string.memcpy(out_pPitch[_idx].getPtr(), &self._pvt_ptr[0].frame.pPitch[_idx], sizeof(cyruntime.cudaPitchedPtr)) # <<<<<<<<<<<<<< * return out_pPitch * @pPitch.setter */ __pyx_t_7 = __Pyx_GetItemInt_List(__pyx_v_out_pPitch, __pyx_v__idx, Py_ssize_t, 1, PyLong_FromSsize_t, 1, 1, 1, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13526, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_5 = __pyx_t_7; __Pyx_INCREF(__pyx_t_5); __pyx_t_8 = 0; { PyObject *__pyx_callargs[2] = {__pyx_t_5, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCallMethod(__pyx_mstate_global->__pyx_n_u_getPtr, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13526, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __pyx_t_12 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_12 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13526, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; (void)(memcpy(((struct cudaPitchedPtr *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_12)), (&((__pyx_v_self->_pvt_ptr[0]).frame.pPitch[__pyx_v__idx])), (sizeof(struct cudaPitchedPtr)))); } /* "cuda/bindings/runtime.pyx":13527 * for _idx in range(len(out_pPitch)): * string.memcpy(out_pPitch[_idx].getPtr(), &self._pvt_ptr[0].frame.pPitch[_idx], sizeof(cyruntime.cudaPitchedPtr)) * return out_pPitch # <<<<<<<<<<<<<< * @pPitch.setter * def pPitch(self, pPitch : list[cudaPitchedPtr]): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_out_pPitch); __pyx_r = __pyx_v_out_pPitch; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13522 * self._pvt_ptr[0].frame.pArray[_idx] = _pArray * * @property # <<<<<<<<<<<<<< * def pPitch(self): * out_pPitch = [cudaPitchedPtr() for _pPitch in self._pvt_ptr[0].frame.pPitch] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("cuda.bindings.runtime.anon_union11.pPitch.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_out_pPitch); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13528 * string.memcpy(out_pPitch[_idx].getPtr(), &self._pvt_ptr[0].frame.pPitch[_idx], sizeof(cyruntime.cudaPitchedPtr)) * return out_pPitch * @pPitch.setter # <<<<<<<<<<<<<< * def pPitch(self, pPitch : list[cudaPitchedPtr]): * if len(pPitch) != 3: */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_12anon_union11_6pPitch_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_pPitch); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_12anon_union11_6pPitch_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_pPitch) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_pPitch), (&PyList_Type), 0, "pPitch", 2))) __PYX_ERR(0, 13529, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_12anon_union11_6pPitch_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_union11 *)__pyx_v_self), ((PyObject*)__pyx_v_pPitch)); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; goto __pyx_L5_cleaned_up; __pyx_L0:; __pyx_L5_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_12anon_union11_6pPitch_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_anon_union11 *__pyx_v_self, PyObject *__pyx_v_pPitch) { Py_ssize_t __pyx_v__idx; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; size_t __pyx_t_7; Py_ssize_t __pyx_t_8; Py_ssize_t __pyx_t_9; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_10; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); /* "cuda/bindings/runtime.pyx":13530 * @pPitch.setter * def pPitch(self, pPitch : list[cudaPitchedPtr]): * if len(pPitch) != 3: # <<<<<<<<<<<<<< * raise IndexError('not enough values found during array assignment, expected 3, got', len(pPitch)) * for _idx in range(len(pPitch)): */ __pyx_t_1 = __Pyx_PyList_GET_SIZE(__pyx_v_pPitch); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 13530, __pyx_L1_error) __pyx_t_2 = (__pyx_t_1 != 3); if (unlikely(__pyx_t_2)) { /* "cuda/bindings/runtime.pyx":13531 * def pPitch(self, pPitch : list[cudaPitchedPtr]): * if len(pPitch) != 3: * raise IndexError('not enough values found during array assignment, expected 3, got', len(pPitch)) # <<<<<<<<<<<<<< * for _idx in range(len(pPitch)): * string.memcpy(&self._pvt_ptr[0].frame.pPitch[_idx], pPitch[_idx].getPtr(), sizeof(cyruntime.cudaPitchedPtr)) */ __pyx_t_4 = NULL; __Pyx_INCREF(__pyx_builtin_IndexError); __pyx_t_5 = __pyx_builtin_IndexError; __pyx_t_1 = __Pyx_PyList_GET_SIZE(__pyx_v_pPitch); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 13531, __pyx_L1_error) __pyx_t_6 = PyLong_FromSsize_t(__pyx_t_1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13531, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = 1; { PyObject *__pyx_callargs[3] = {__pyx_t_4, __pyx_mstate_global->__pyx_kp_u_not_enough_values_found_during_a, __pyx_t_6}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_7, (3-__pyx_t_7) | (__pyx_t_7*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13531, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 13531, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":13530 * @pPitch.setter * def pPitch(self, pPitch : list[cudaPitchedPtr]): * if len(pPitch) != 3: # <<<<<<<<<<<<<< * raise IndexError('not enough values found during array assignment, expected 3, got', len(pPitch)) * for _idx in range(len(pPitch)): */ } /* "cuda/bindings/runtime.pyx":13532 * if len(pPitch) != 3: * raise IndexError('not enough values found during array assignment, expected 3, got', len(pPitch)) * for _idx in range(len(pPitch)): # <<<<<<<<<<<<<< * string.memcpy(&self._pvt_ptr[0].frame.pPitch[_idx], pPitch[_idx].getPtr(), sizeof(cyruntime.cudaPitchedPtr)) * */ __pyx_t_1 = __Pyx_PyList_GET_SIZE(__pyx_v_pPitch); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 13532, __pyx_L1_error) __pyx_t_8 = __pyx_t_1; for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) { __pyx_v__idx = __pyx_t_9; /* "cuda/bindings/runtime.pyx":13533 * raise IndexError('not enough values found during array assignment, expected 3, got', len(pPitch)) * for _idx in range(len(pPitch)): * string.memcpy(&self._pvt_ptr[0].frame.pPitch[_idx], pPitch[_idx].getPtr(), sizeof(cyruntime.cudaPitchedPtr)) # <<<<<<<<<<<<<< * * */ __pyx_t_6 = __Pyx_GetItemInt_List(__pyx_v_pPitch, __pyx_v__idx, Py_ssize_t, 1, PyLong_FromSsize_t, 1, 1, 1, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13533, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = __pyx_t_6; __Pyx_INCREF(__pyx_t_5); __pyx_t_7 = 0; { PyObject *__pyx_callargs[2] = {__pyx_t_5, NULL}; __pyx_t_3 = __Pyx_PyObject_FastCallMethod(__pyx_mstate_global->__pyx_n_u_getPtr, __pyx_callargs+__pyx_t_7, (1-__pyx_t_7) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13533, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } __pyx_t_10 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_3); if (unlikely((__pyx_t_10 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13533, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; (void)(memcpy((&((__pyx_v_self->_pvt_ptr[0]).frame.pPitch[__pyx_v__idx])), ((struct cudaPitchedPtr *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_10)), (sizeof(struct cudaPitchedPtr)))); } /* "cuda/bindings/runtime.pyx":13528 * string.memcpy(out_pPitch[_idx].getPtr(), &self._pvt_ptr[0].frame.pPitch[_idx], sizeof(cyruntime.cudaPitchedPtr)) * return out_pPitch * @pPitch.setter # <<<<<<<<<<<<<< * def pPitch(self, pPitch : list[cudaPitchedPtr]): * if len(pPitch) != 3: */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("cuda.bindings.runtime.anon_union11.pPitch.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_12anon_union11_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_12anon_union11_10__reduce_cython__, "anon_union11.__reduce_cython__(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_12anon_union11_11__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_12anon_union11_11__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_12anon_union11_10__reduce_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_12anon_union11_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_12anon_union11_10__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_union11 *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_12anon_union11_10__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_anon_union11 *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.anon_union11.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_12anon_union11_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_12anon_union11_12__setstate_cython__, "anon_union11.__setstate_cython__(self, __pyx_state)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_12anon_union11_13__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_12anon_union11_13__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_12anon_union11_12__setstate_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_12anon_union11_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(2, 3, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(2, 3, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(2, 3, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) } __pyx_v___pyx_state = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(2, 3, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.anon_union11.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_12anon_union11_12__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_anon_union11 *)__pyx_v_self), __pyx_v___pyx_state); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_12anon_union11_12__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_anon_union11 *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.anon_union11.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13565 * Get memory address of class instance * """ * def __cinit__(self, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * if _ptr == 0: * self._val_ptr = calloc(1, sizeof(cyruntime.cudaEglFrame_st)) */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr; CYTHON_UNUSED Py_ssize_t __pyx_nargs; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1; #endif __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13565, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13565, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__cinit__", 0) < (0)) __PYX_ERR(0, 13565, __pyx_L3_error) } else { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13565, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } } if (values[0]) { __pyx_v__ptr = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[0]); if (unlikely((__pyx_v__ptr == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13565, __pyx_L3_error) } else { __pyx_v__ptr = ((__pyx_t_4cuda_8bindings_7runtime_void_ptr)0); } } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 13565, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglFrame_st.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaEglFrame_st___cinit__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglFrame_st *)__pyx_v_self), __pyx_v__ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_15cudaEglFrame_st___cinit__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglFrame_st *__pyx_v_self, __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr) { int __pyx_r; int __pyx_t_1; struct __pyx_t_4cuda_8bindings_9cyruntime_cudaEglFrame_st *__pyx_t_2; /* "cuda/bindings/runtime.pyx":13566 * """ * def __cinit__(self, void_ptr _ptr = 0): * if _ptr == 0: # <<<<<<<<<<<<<< * self._val_ptr = calloc(1, sizeof(cyruntime.cudaEglFrame_st)) * self._pvt_ptr = self._val_ptr */ __pyx_t_1 = (__pyx_v__ptr == 0); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":13567 * def __cinit__(self, void_ptr _ptr = 0): * if _ptr == 0: * self._val_ptr = calloc(1, sizeof(cyruntime.cudaEglFrame_st)) # <<<<<<<<<<<<<< * self._pvt_ptr = self._val_ptr * else: */ __pyx_v_self->_val_ptr = ((struct __pyx_t_4cuda_8bindings_9cyruntime_cudaEglFrame_st *)calloc(1, (sizeof(struct __pyx_t_4cuda_8bindings_9cyruntime_cudaEglFrame_st)))); /* "cuda/bindings/runtime.pyx":13568 * if _ptr == 0: * self._val_ptr = calloc(1, sizeof(cyruntime.cudaEglFrame_st)) * self._pvt_ptr = self._val_ptr # <<<<<<<<<<<<<< * else: * self._pvt_ptr = _ptr */ __pyx_t_2 = __pyx_v_self->_val_ptr; __pyx_v_self->_pvt_ptr = __pyx_t_2; /* "cuda/bindings/runtime.pyx":13566 * """ * def __cinit__(self, void_ptr _ptr = 0): * if _ptr == 0: # <<<<<<<<<<<<<< * self._val_ptr = calloc(1, sizeof(cyruntime.cudaEglFrame_st)) * self._pvt_ptr = self._val_ptr */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":13570 * self._pvt_ptr = self._val_ptr * else: * self._pvt_ptr = _ptr # <<<<<<<<<<<<<< * def __init__(self, void_ptr _ptr = 0): * pass */ /*else*/ { __pyx_v_self->_pvt_ptr = ((struct __pyx_t_4cuda_8bindings_9cyruntime_cudaEglFrame_st *)__pyx_v__ptr); } __pyx_L3:; /* "cuda/bindings/runtime.pyx":13565 * Get memory address of class instance * """ * def __cinit__(self, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * if _ptr == 0: * self._val_ptr = calloc(1, sizeof(cyruntime.cudaEglFrame_st)) */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":13571 * else: * self._pvt_ptr = _ptr * def __init__(self, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * pass * self._frame = anon_union11(_ptr=self._pvt_ptr) */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_3__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_3__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { CYTHON_UNUSED __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr; CYTHON_UNUSED Py_ssize_t __pyx_nargs; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1; #endif __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13571, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13571, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__init__", 0) < (0)) __PYX_ERR(0, 13571, __pyx_L3_error) } else { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13571, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } } if (values[0]) { __pyx_v__ptr = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[0]); if (unlikely((__pyx_v__ptr == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13571, __pyx_L3_error) } else { __pyx_v__ptr = ((__pyx_t_4cuda_8bindings_7runtime_void_ptr)0); } } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 0, 0, 1, __pyx_nargs); __PYX_ERR(0, 13571, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglFrame_st.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaEglFrame_st_2__init__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglFrame_st *)__pyx_v_self), __pyx_v__ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_15cudaEglFrame_st_2__init__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglFrame_st *__pyx_v_self, CYTHON_UNUSED __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__init__", 0); /* "cuda/bindings/runtime.pyx":13573 * def __init__(self, void_ptr _ptr = 0): * pass * self._frame = anon_union11(_ptr=self._pvt_ptr) # <<<<<<<<<<<<<< * def __dealloc__(self): * if self._val_ptr is not NULL: */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_anon_union11); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_anon_union11); __pyx_t_4 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_v_self->_pvt_ptr)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13573, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_2, NULL}; __pyx_t_6 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13573, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_ptr_2, __pyx_t_4, __pyx_t_6, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 13573, __pyx_L1_error) __pyx_t_1 = __Pyx_Object_Vectorcall_CallFromBuilder(__pyx_t_3, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_6); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13573, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __Pyx_GIVEREF((PyObject *)__pyx_t_1); __Pyx_GOTREF((PyObject *)__pyx_v_self->_frame); __Pyx_DECREF((PyObject *)__pyx_v_self->_frame); __pyx_v_self->_frame = ((struct __pyx_obj_4cuda_8bindings_7runtime_anon_union11 *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":13571 * else: * self._pvt_ptr = _ptr * def __init__(self, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * pass * self._frame = anon_union11(_ptr=self._pvt_ptr) */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglFrame_st.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13574 * pass * self._frame = anon_union11(_ptr=self._pvt_ptr) * def __dealloc__(self): # <<<<<<<<<<<<<< * if self._val_ptr is not NULL: * free(self._val_ptr) */ /* Python wrapper */ static void __pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_5__dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_5__dealloc__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_pf_4cuda_8bindings_7runtime_15cudaEglFrame_st_4__dealloc__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglFrame_st *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_4cuda_8bindings_7runtime_15cudaEglFrame_st_4__dealloc__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglFrame_st *__pyx_v_self) { int __pyx_t_1; /* "cuda/bindings/runtime.pyx":13575 * self._frame = anon_union11(_ptr=self._pvt_ptr) * def __dealloc__(self): * if self._val_ptr is not NULL: # <<<<<<<<<<<<<< * free(self._val_ptr) * def getPtr(self): */ __pyx_t_1 = (__pyx_v_self->_val_ptr != NULL); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":13576 * def __dealloc__(self): * if self._val_ptr is not NULL: * free(self._val_ptr) # <<<<<<<<<<<<<< * def getPtr(self): * return self._pvt_ptr */ free(__pyx_v_self->_val_ptr); /* "cuda/bindings/runtime.pyx":13575 * self._frame = anon_union11(_ptr=self._pvt_ptr) * def __dealloc__(self): * if self._val_ptr is not NULL: # <<<<<<<<<<<<<< * free(self._val_ptr) * def getPtr(self): */ } /* "cuda/bindings/runtime.pyx":13574 * pass * self._frame = anon_union11(_ptr=self._pvt_ptr) * def __dealloc__(self): # <<<<<<<<<<<<<< * if self._val_ptr is not NULL: * free(self._val_ptr) */ /* function exit code */ } /* "cuda/bindings/runtime.pyx":13577 * if self._val_ptr is not NULL: * free(self._val_ptr) * def getPtr(self): # <<<<<<<<<<<<<< * return self._pvt_ptr * def __repr__(self): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_7getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_15cudaEglFrame_st_6getPtr, "cudaEglFrame_st.getPtr(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_15cudaEglFrame_st_7getPtr = {"getPtr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_7getPtr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_15cudaEglFrame_st_6getPtr}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_7getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("getPtr (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("getPtr", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("getPtr", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaEglFrame_st_6getPtr(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglFrame_st *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_15cudaEglFrame_st_6getPtr(struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglFrame_st *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("getPtr", 0); /* "cuda/bindings/runtime.pyx":13578 * free(self._val_ptr) * def getPtr(self): * return self._pvt_ptr # <<<<<<<<<<<<<< * def __repr__(self): * if self._pvt_ptr is not NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_v_self->_pvt_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13578, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13577 * if self._val_ptr is not NULL: * free(self._val_ptr) * def getPtr(self): # <<<<<<<<<<<<<< * return self._pvt_ptr * def __repr__(self): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglFrame_st.getPtr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13579 * def getPtr(self): * return self._pvt_ptr * def __repr__(self): # <<<<<<<<<<<<<< * if self._pvt_ptr is not NULL: * str_list = [] */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_9__repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_9__repr__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaEglFrame_st_8__repr__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglFrame_st *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_15cudaEglFrame_st_8__repr__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglFrame_st *__pyx_v_self) { PyObject *__pyx_v_str_list = NULL; PyObject *__pyx_10genexpr186__pyx_v_line = NULL; PyObject *__pyx_10genexpr187__pyx_v_line = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; int __pyx_t_9; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "cuda/bindings/runtime.pyx":13580 * return self._pvt_ptr * def __repr__(self): * if self._pvt_ptr is not NULL: # <<<<<<<<<<<<<< * str_list = [] * try: */ __pyx_t_1 = (__pyx_v_self->_pvt_ptr != NULL); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":13581 * def __repr__(self): * if self._pvt_ptr is not NULL: * str_list = [] # <<<<<<<<<<<<<< * try: * str_list += ['frame :\n' + '\n'.join([' ' + line for line in str(self.frame).splitlines()])] */ __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13581, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_str_list = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":13582 * if self._pvt_ptr is not NULL: * str_list = [] * try: # <<<<<<<<<<<<<< * str_list += ['frame :\n' + '\n'.join([' ' + line for line in str(self.frame).splitlines()])] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "cuda/bindings/runtime.pyx":13583 * str_list = [] * try: * str_list += ['frame :\n' + '\n'.join([' ' + line for line in str(self.frame).splitlines()])] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['frame : '] */ { /* enter inner scope */ __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13583, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_frame_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13583, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyObject_Unicode(__pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13583, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyUnicode_Splitlines(((PyObject*)__pyx_t_7), 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13583, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __pyx_t_6; __Pyx_INCREF(__pyx_t_7); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; for (;;) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_7); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 13583, __pyx_L12_error) #endif if (__pyx_t_8 >= __pyx_temp) break; } __pyx_t_6 = __Pyx_PyList_GetItemRef(__pyx_t_7, __pyx_t_8); ++__pyx_t_8; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13583, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_10genexpr186__pyx_v_line, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyNumber_Add(__pyx_mstate_global->__pyx_kp_u__4, __pyx_10genexpr186__pyx_v_line); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13583, __pyx_L12_error) __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(0, 13583, __pyx_L12_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_10genexpr186__pyx_v_line); __pyx_10genexpr186__pyx_v_line = 0; goto __pyx_L16_exit_scope; __pyx_L12_error:; __Pyx_XDECREF(__pyx_10genexpr186__pyx_v_line); __pyx_10genexpr186__pyx_v_line = 0; goto __pyx_L4_error; __pyx_L16_exit_scope:; } /* exit inner scope */ __pyx_t_7 = PyUnicode_Join(__pyx_mstate_global->__pyx_kp_u__2, __pyx_t_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13583, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_frame, __pyx_t_7); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13583, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = PyList_New(1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13583, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyList_SET_ITEM(__pyx_t_7, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 13583, __pyx_L4_error); __pyx_t_2 = 0; __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_7); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13583, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_2)); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":13582 * if self._pvt_ptr is not NULL: * str_list = [] * try: # <<<<<<<<<<<<<< * str_list += ['frame :\n' + '\n'.join([' ' + line for line in str(self.frame).splitlines()])] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; /* "cuda/bindings/runtime.pyx":13584 * try: * str_list += ['frame :\n' + '\n'.join([' ' + line for line in str(self.frame).splitlines()])] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['frame : '] * try: */ __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_9) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglFrame_st.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_2, &__pyx_t_7, &__pyx_t_6) < 0) __PYX_ERR(0, 13584, __pyx_L6_except_error) __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_6); /* "cuda/bindings/runtime.pyx":13585 * str_list += ['frame :\n' + '\n'.join([' ' + line for line in str(self.frame).splitlines()])] * except ValueError: * str_list += ['frame : '] # <<<<<<<<<<<<<< * try: * str_list += ['planeDesc :\n' + '\n'.join([' ' + line for line in str(self.planeDesc).splitlines()])] */ __pyx_t_10 = PyList_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13585, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_frame_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_frame_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_10, 0, __pyx_mstate_global->__pyx_kp_u_frame_ValueError) != (0)) __PYX_ERR(0, 13585, __pyx_L6_except_error); __pyx_t_11 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_10); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 13585, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_11)); __pyx_t_11 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; goto __pyx_L5_exception_handled; } goto __pyx_L6_except_error; /* "cuda/bindings/runtime.pyx":13582 * if self._pvt_ptr is not NULL: * str_list = [] * try: # <<<<<<<<<<<<<< * str_list += ['frame :\n' + '\n'.join([' ' + line for line in str(self.frame).splitlines()])] * except ValueError: */ __pyx_L6_except_error:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L5_exception_handled:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); __pyx_L9_try_end:; } /* "cuda/bindings/runtime.pyx":13586 * except ValueError: * str_list += ['frame : '] * try: # <<<<<<<<<<<<<< * str_list += ['planeDesc :\n' + '\n'.join([' ' + line for line in str(self.planeDesc).splitlines()])] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_5, &__pyx_t_4, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_5); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "cuda/bindings/runtime.pyx":13587 * str_list += ['frame : '] * try: * str_list += ['planeDesc :\n' + '\n'.join([' ' + line for line in str(self.planeDesc).splitlines()])] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['planeDesc : '] */ { /* enter inner scope */ __pyx_t_6 = PyList_New(0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13587, __pyx_L27_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_planeDesc_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13587, __pyx_L27_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_2 = __Pyx_PyObject_Unicode(__pyx_t_7); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13587, __pyx_L27_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = PyUnicode_Splitlines(((PyObject*)__pyx_t_2), 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13587, __pyx_L27_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __pyx_t_7; __Pyx_INCREF(__pyx_t_2); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; for (;;) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_2); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 13587, __pyx_L27_error) #endif if (__pyx_t_8 >= __pyx_temp) break; } __pyx_t_7 = __Pyx_PyList_GetItemRef(__pyx_t_2, __pyx_t_8); ++__pyx_t_8; if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13587, __pyx_L27_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_XDECREF_SET(__pyx_10genexpr187__pyx_v_line, __pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = PyNumber_Add(__pyx_mstate_global->__pyx_kp_u__4, __pyx_10genexpr187__pyx_v_line); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13587, __pyx_L27_error) __Pyx_GOTREF(__pyx_t_7); if (unlikely(__Pyx_ListComp_Append(__pyx_t_6, (PyObject*)__pyx_t_7))) __PYX_ERR(0, 13587, __pyx_L27_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_10genexpr187__pyx_v_line); __pyx_10genexpr187__pyx_v_line = 0; goto __pyx_L31_exit_scope; __pyx_L27_error:; __Pyx_XDECREF(__pyx_10genexpr187__pyx_v_line); __pyx_10genexpr187__pyx_v_line = 0; goto __pyx_L19_error; __pyx_L31_exit_scope:; } /* exit inner scope */ __pyx_t_2 = PyUnicode_Join(__pyx_mstate_global->__pyx_kp_u__2, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13587, __pyx_L19_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_planeDesc, __pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13587, __pyx_L19_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13587, __pyx_L19_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_6); if (__Pyx_PyList_SET_ITEM(__pyx_t_2, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 13587, __pyx_L19_error); __pyx_t_6 = 0; __pyx_t_6 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13587, __pyx_L19_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_6)); __pyx_t_6 = 0; /* "cuda/bindings/runtime.pyx":13586 * except ValueError: * str_list += ['frame : '] * try: # <<<<<<<<<<<<<< * str_list += ['planeDesc :\n' + '\n'.join([' ' + line for line in str(self.planeDesc).splitlines()])] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L24_try_end; __pyx_L19_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; /* "cuda/bindings/runtime.pyx":13588 * try: * str_list += ['planeDesc :\n' + '\n'.join([' ' + line for line in str(self.planeDesc).splitlines()])] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['planeDesc : '] * try: */ __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_9) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglFrame_st.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_6, &__pyx_t_2, &__pyx_t_7) < 0) __PYX_ERR(0, 13588, __pyx_L21_except_error) __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_7); /* "cuda/bindings/runtime.pyx":13589 * str_list += ['planeDesc :\n' + '\n'.join([' ' + line for line in str(self.planeDesc).splitlines()])] * except ValueError: * str_list += ['planeDesc : '] # <<<<<<<<<<<<<< * try: * str_list += ['planeCount : ' + str(self.planeCount)] */ __pyx_t_11 = PyList_New(1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 13589, __pyx_L21_except_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_planeDesc_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_planeDesc_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_11, 0, __pyx_mstate_global->__pyx_kp_u_planeDesc_ValueError) != (0)) __PYX_ERR(0, 13589, __pyx_L21_except_error); __pyx_t_10 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_11); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13589, __pyx_L21_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_10)); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; goto __pyx_L20_exception_handled; } goto __pyx_L21_except_error; /* "cuda/bindings/runtime.pyx":13586 * except ValueError: * str_list += ['frame : '] * try: # <<<<<<<<<<<<<< * str_list += ['planeDesc :\n' + '\n'.join([' ' + line for line in str(self.planeDesc).splitlines()])] * except ValueError: */ __pyx_L21_except_error:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); goto __pyx_L1_error; __pyx_L20_exception_handled:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); __pyx_L24_try_end:; } /* "cuda/bindings/runtime.pyx":13590 * except ValueError: * str_list += ['planeDesc : '] * try: # <<<<<<<<<<<<<< * str_list += ['planeCount : ' + str(self.planeCount)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "cuda/bindings/runtime.pyx":13591 * str_list += ['planeDesc : '] * try: * str_list += ['planeCount : ' + str(self.planeCount)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['planeCount : '] */ __pyx_t_7 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_planeCount_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13591, __pyx_L34_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_2 = __Pyx_PyObject_Unicode(__pyx_t_7); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13591, __pyx_L34_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_planeCount, __pyx_t_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13591, __pyx_L34_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13591, __pyx_L34_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_7); if (__Pyx_PyList_SET_ITEM(__pyx_t_2, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 13591, __pyx_L34_error); __pyx_t_7 = 0; __pyx_t_7 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13591, __pyx_L34_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_7)); __pyx_t_7 = 0; /* "cuda/bindings/runtime.pyx":13590 * except ValueError: * str_list += ['planeDesc : '] * try: # <<<<<<<<<<<<<< * str_list += ['planeCount : ' + str(self.planeCount)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L39_try_end; __pyx_L34_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; /* "cuda/bindings/runtime.pyx":13592 * try: * str_list += ['planeCount : ' + str(self.planeCount)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['planeCount : '] * try: */ __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_9) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglFrame_st.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_2, &__pyx_t_6) < 0) __PYX_ERR(0, 13592, __pyx_L36_except_error) __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_6); /* "cuda/bindings/runtime.pyx":13593 * str_list += ['planeCount : ' + str(self.planeCount)] * except ValueError: * str_list += ['planeCount : '] # <<<<<<<<<<<<<< * try: * str_list += ['frameType : ' + str(self.frameType)] */ __pyx_t_10 = PyList_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13593, __pyx_L36_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_planeCount_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_planeCount_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_10, 0, __pyx_mstate_global->__pyx_kp_u_planeCount_ValueError) != (0)) __PYX_ERR(0, 13593, __pyx_L36_except_error); __pyx_t_11 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_10); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 13593, __pyx_L36_except_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_11)); __pyx_t_11 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; goto __pyx_L35_exception_handled; } goto __pyx_L36_except_error; /* "cuda/bindings/runtime.pyx":13590 * except ValueError: * str_list += ['planeDesc : '] * try: # <<<<<<<<<<<<<< * str_list += ['planeCount : ' + str(self.planeCount)] * except ValueError: */ __pyx_L36_except_error:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L35_exception_handled:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); __pyx_L39_try_end:; } /* "cuda/bindings/runtime.pyx":13594 * except ValueError: * str_list += ['planeCount : '] * try: # <<<<<<<<<<<<<< * str_list += ['frameType : ' + str(self.frameType)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_5, &__pyx_t_4, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_5); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "cuda/bindings/runtime.pyx":13595 * str_list += ['planeCount : '] * try: * str_list += ['frameType : ' + str(self.frameType)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['frameType : '] */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_frameType_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13595, __pyx_L42_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_2 = __Pyx_PyObject_Unicode(__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13595, __pyx_L42_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_frameType, __pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13595, __pyx_L42_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13595, __pyx_L42_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_6); if (__Pyx_PyList_SET_ITEM(__pyx_t_2, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 13595, __pyx_L42_error); __pyx_t_6 = 0; __pyx_t_6 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13595, __pyx_L42_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_6)); __pyx_t_6 = 0; /* "cuda/bindings/runtime.pyx":13594 * except ValueError: * str_list += ['planeCount : '] * try: # <<<<<<<<<<<<<< * str_list += ['frameType : ' + str(self.frameType)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L47_try_end; __pyx_L42_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; /* "cuda/bindings/runtime.pyx":13596 * try: * str_list += ['frameType : ' + str(self.frameType)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['frameType : '] * try: */ __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_9) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglFrame_st.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_6, &__pyx_t_2, &__pyx_t_7) < 0) __PYX_ERR(0, 13596, __pyx_L44_except_error) __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_7); /* "cuda/bindings/runtime.pyx":13597 * str_list += ['frameType : ' + str(self.frameType)] * except ValueError: * str_list += ['frameType : '] # <<<<<<<<<<<<<< * try: * str_list += ['eglColorFormat : ' + str(self.eglColorFormat)] */ __pyx_t_11 = PyList_New(1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 13597, __pyx_L44_except_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_frameType_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_frameType_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_11, 0, __pyx_mstate_global->__pyx_kp_u_frameType_ValueError) != (0)) __PYX_ERR(0, 13597, __pyx_L44_except_error); __pyx_t_10 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_11); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13597, __pyx_L44_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_10)); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; goto __pyx_L43_exception_handled; } goto __pyx_L44_except_error; /* "cuda/bindings/runtime.pyx":13594 * except ValueError: * str_list += ['planeCount : '] * try: # <<<<<<<<<<<<<< * str_list += ['frameType : ' + str(self.frameType)] * except ValueError: */ __pyx_L44_except_error:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); goto __pyx_L1_error; __pyx_L43_exception_handled:; __Pyx_XGIVEREF(__pyx_t_5); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_4, __pyx_t_3); __pyx_L47_try_end:; } /* "cuda/bindings/runtime.pyx":13598 * except ValueError: * str_list += ['frameType : '] * try: # <<<<<<<<<<<<<< * str_list += ['eglColorFormat : ' + str(self.eglColorFormat)] * except ValueError: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "cuda/bindings/runtime.pyx":13599 * str_list += ['frameType : '] * try: * str_list += ['eglColorFormat : ' + str(self.eglColorFormat)] # <<<<<<<<<<<<<< * except ValueError: * str_list += ['eglColorFormat : '] */ __pyx_t_7 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_mstate_global->__pyx_n_u_eglColorFormat_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13599, __pyx_L50_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_2 = __Pyx_PyObject_Unicode(__pyx_t_7); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13599, __pyx_L50_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_eglColorFormat, __pyx_t_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13599, __pyx_L50_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13599, __pyx_L50_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_7); if (__Pyx_PyList_SET_ITEM(__pyx_t_2, 0, __pyx_t_7) != (0)) __PYX_ERR(0, 13599, __pyx_L50_error); __pyx_t_7 = 0; __pyx_t_7 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13599, __pyx_L50_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_7)); __pyx_t_7 = 0; /* "cuda/bindings/runtime.pyx":13598 * except ValueError: * str_list += ['frameType : '] * try: # <<<<<<<<<<<<<< * str_list += ['eglColorFormat : ' + str(self.eglColorFormat)] * except ValueError: */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L55_try_end; __pyx_L50_error:; __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; /* "cuda/bindings/runtime.pyx":13600 * try: * str_list += ['eglColorFormat : ' + str(self.eglColorFormat)] * except ValueError: # <<<<<<<<<<<<<< * str_list += ['eglColorFormat : '] * return '\n'.join(str_list) */ __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_ValueError); if (__pyx_t_9) { __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglFrame_st.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_2, &__pyx_t_6) < 0) __PYX_ERR(0, 13600, __pyx_L52_except_error) __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_6); /* "cuda/bindings/runtime.pyx":13601 * str_list += ['eglColorFormat : ' + str(self.eglColorFormat)] * except ValueError: * str_list += ['eglColorFormat : '] # <<<<<<<<<<<<<< * return '\n'.join(str_list) * else: */ __pyx_t_10 = PyList_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 13601, __pyx_L52_except_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u_eglColorFormat_ValueError); __Pyx_GIVEREF(__pyx_mstate_global->__pyx_kp_u_eglColorFormat_ValueError); if (__Pyx_PyList_SET_ITEM(__pyx_t_10, 0, __pyx_mstate_global->__pyx_kp_u_eglColorFormat_ValueError) != (0)) __PYX_ERR(0, 13601, __pyx_L52_except_error); __pyx_t_11 = PyNumber_InPlaceAdd(__pyx_v_str_list, __pyx_t_10); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 13601, __pyx_L52_except_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF_SET(__pyx_v_str_list, ((PyObject*)__pyx_t_11)); __pyx_t_11 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; goto __pyx_L51_exception_handled; } goto __pyx_L52_except_error; /* "cuda/bindings/runtime.pyx":13598 * except ValueError: * str_list += ['frameType : '] * try: # <<<<<<<<<<<<<< * str_list += ['eglColorFormat : ' + str(self.eglColorFormat)] * except ValueError: */ __pyx_L52_except_error:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L51_exception_handled:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); __pyx_L55_try_end:; } /* "cuda/bindings/runtime.pyx":13602 * except ValueError: * str_list += ['eglColorFormat : '] * return '\n'.join(str_list) # <<<<<<<<<<<<<< * else: * return '' */ __Pyx_XDECREF(__pyx_r); __pyx_t_6 = PyUnicode_Join(__pyx_mstate_global->__pyx_kp_u__2, __pyx_v_str_list); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13602, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13580 * return self._pvt_ptr * def __repr__(self): * if self._pvt_ptr is not NULL: # <<<<<<<<<<<<<< * str_list = [] * try: */ } /* "cuda/bindings/runtime.pyx":13604 * return '\n'.join(str_list) * else: * return '' # <<<<<<<<<<<<<< * @property * def frame(self): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_mstate_global->__pyx_kp_u__3); __pyx_r = __pyx_mstate_global->__pyx_kp_u__3; goto __pyx_L0; } /* "cuda/bindings/runtime.pyx":13579 * def getPtr(self): * return self._pvt_ptr * def __repr__(self): # <<<<<<<<<<<<<< * if self._pvt_ptr is not NULL: * str_list = [] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_10); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglFrame_st.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_str_list); __Pyx_XDECREF(__pyx_10genexpr186__pyx_v_line); __Pyx_XDECREF(__pyx_10genexpr187__pyx_v_line); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13605 * else: * return '' * @property # <<<<<<<<<<<<<< * def frame(self): * return self._frame */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_5frame_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_5frame_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaEglFrame_st_5frame___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglFrame_st *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_15cudaEglFrame_st_5frame___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglFrame_st *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":13607 * @property * def frame(self): * return self._frame # <<<<<<<<<<<<<< * @frame.setter * def frame(self, frame not None : anon_union11): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF((PyObject *)__pyx_v_self->_frame); __pyx_r = ((PyObject *)__pyx_v_self->_frame); goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13605 * else: * return '' * @property # <<<<<<<<<<<<<< * def frame(self): * return self._frame */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13608 * def frame(self): * return self._frame * @frame.setter # <<<<<<<<<<<<<< * def frame(self, frame not None : anon_union11): * string.memcpy(&self._pvt_ptr[0].frame, frame.getPtr(), sizeof(self._pvt_ptr[0].frame)) */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_5frame_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_frame); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_5frame_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_frame) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_frame), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_anon_union11, 0, "frame", 0))) __PYX_ERR(0, 13609, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaEglFrame_st_5frame_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglFrame_st *)__pyx_v_self), ((struct __pyx_obj_4cuda_8bindings_7runtime_anon_union11 *)__pyx_v_frame)); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; goto __pyx_L5_cleaned_up; __pyx_L0:; __pyx_L5_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_15cudaEglFrame_st_5frame_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglFrame_st *__pyx_v_self, struct __pyx_obj_4cuda_8bindings_7runtime_anon_union11 *__pyx_v_frame) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; size_t __pyx_t_3; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); /* "cuda/bindings/runtime.pyx":13610 * @frame.setter * def frame(self, frame not None : anon_union11): * string.memcpy(&self._pvt_ptr[0].frame, frame.getPtr(), sizeof(self._pvt_ptr[0].frame)) # <<<<<<<<<<<<<< * @property * def planeDesc(self): */ __pyx_t_2 = ((PyObject *)__pyx_v_frame); __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCallMethod(__pyx_mstate_global->__pyx_n_u_getPtr, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13610, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __pyx_t_4 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_4 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13610, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; (void)(memcpy((&(__pyx_v_self->_pvt_ptr[0]).frame), ((union __pyx_t_4cuda_8bindings_9cyruntime_anon_union11 *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_4)), (sizeof((__pyx_v_self->_pvt_ptr[0]).frame)))); /* "cuda/bindings/runtime.pyx":13608 * def frame(self): * return self._frame * @frame.setter # <<<<<<<<<<<<<< * def frame(self, frame not None : anon_union11): * string.memcpy(&self._pvt_ptr[0].frame, frame.getPtr(), sizeof(self._pvt_ptr[0].frame)) */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglFrame_st.frame.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13611 * def frame(self, frame not None : anon_union11): * string.memcpy(&self._pvt_ptr[0].frame, frame.getPtr(), sizeof(self._pvt_ptr[0].frame)) * @property # <<<<<<<<<<<<<< * def planeDesc(self): * out_planeDesc = [cudaEglPlaneDesc() for _planeDesc in self._pvt_ptr[0].planeDesc] */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_9planeDesc_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_9planeDesc_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaEglFrame_st_9planeDesc___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglFrame_st *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_15cudaEglFrame_st_9planeDesc___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglFrame_st *__pyx_v_self) { PyObject *__pyx_v_out_planeDesc = NULL; Py_ssize_t __pyx_v__idx; CYTHON_UNUSED __pyx_t_4cuda_8bindings_9cyruntime_cudaEglPlaneDesc __pyx_10genexpr188__pyx_v__planeDesc; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __pyx_t_4cuda_8bindings_9cyruntime_cudaEglPlaneDesc *__pyx_t_2; __pyx_t_4cuda_8bindings_9cyruntime_cudaEglPlaneDesc *__pyx_t_3; __pyx_t_4cuda_8bindings_9cyruntime_cudaEglPlaneDesc *__pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; size_t __pyx_t_8; Py_ssize_t __pyx_t_9; Py_ssize_t __pyx_t_10; Py_ssize_t __pyx_t_11; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":13613 * @property * def planeDesc(self): * out_planeDesc = [cudaEglPlaneDesc() for _planeDesc in self._pvt_ptr[0].planeDesc] # <<<<<<<<<<<<<< * for _idx in range(len(out_planeDesc)): * string.memcpy(out_planeDesc[_idx].getPtr(), &self._pvt_ptr[0].planeDesc[_idx], sizeof(cyruntime.cudaEglPlaneDesc)) */ { /* enter inner scope */ __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13613, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = ((__pyx_v_self->_pvt_ptr[0]).planeDesc + 3); for (__pyx_t_4 = (__pyx_v_self->_pvt_ptr[0]).planeDesc; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_10genexpr188__pyx_v__planeDesc = (__pyx_t_2[0]); __pyx_t_6 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEglPlaneDesc); __pyx_t_7 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEglPlaneDesc); __pyx_t_8 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_6, NULL}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_7, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 13613, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(0, 13613, __pyx_L1_error) __Pyx_DECREF((PyObject *)__pyx_t_5); __pyx_t_5 = 0; } } /* exit inner scope */ __pyx_v_out_planeDesc = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":13614 * def planeDesc(self): * out_planeDesc = [cudaEglPlaneDesc() for _planeDesc in self._pvt_ptr[0].planeDesc] * for _idx in range(len(out_planeDesc)): # <<<<<<<<<<<<<< * string.memcpy(out_planeDesc[_idx].getPtr(), &self._pvt_ptr[0].planeDesc[_idx], sizeof(cyruntime.cudaEglPlaneDesc)) * return out_planeDesc */ __pyx_t_9 = __Pyx_PyList_GET_SIZE(__pyx_v_out_planeDesc); if (unlikely(__pyx_t_9 == ((Py_ssize_t)-1))) __PYX_ERR(0, 13614, __pyx_L1_error) __pyx_t_10 = __pyx_t_9; for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) { __pyx_v__idx = __pyx_t_11; /* "cuda/bindings/runtime.pyx":13615 * out_planeDesc = [cudaEglPlaneDesc() for _planeDesc in self._pvt_ptr[0].planeDesc] * for _idx in range(len(out_planeDesc)): * string.memcpy(out_planeDesc[_idx].getPtr(), &self._pvt_ptr[0].planeDesc[_idx], sizeof(cyruntime.cudaEglPlaneDesc)) # <<<<<<<<<<<<<< * return out_planeDesc * @planeDesc.setter */ __pyx_t_7 = __Pyx_GetItemInt_List(__pyx_v_out_planeDesc, __pyx_v__idx, Py_ssize_t, 1, PyLong_FromSsize_t, 1, 1, 1, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 13615, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_5 = __pyx_t_7; __Pyx_INCREF(__pyx_t_5); __pyx_t_8 = 0; { PyObject *__pyx_callargs[2] = {__pyx_t_5, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCallMethod(__pyx_mstate_global->__pyx_n_u_getPtr, __pyx_callargs+__pyx_t_8, (1-__pyx_t_8) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13615, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __pyx_t_12 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_12 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13615, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; (void)(memcpy(((__pyx_t_4cuda_8bindings_9cyruntime_cudaEglPlaneDesc *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_12)), (&((__pyx_v_self->_pvt_ptr[0]).planeDesc[__pyx_v__idx])), (sizeof(__pyx_t_4cuda_8bindings_9cyruntime_cudaEglPlaneDesc)))); } /* "cuda/bindings/runtime.pyx":13616 * for _idx in range(len(out_planeDesc)): * string.memcpy(out_planeDesc[_idx].getPtr(), &self._pvt_ptr[0].planeDesc[_idx], sizeof(cyruntime.cudaEglPlaneDesc)) * return out_planeDesc # <<<<<<<<<<<<<< * @planeDesc.setter * def planeDesc(self, planeDesc : list[cudaEglPlaneDesc]): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_out_planeDesc); __pyx_r = __pyx_v_out_planeDesc; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13611 * def frame(self, frame not None : anon_union11): * string.memcpy(&self._pvt_ptr[0].frame, frame.getPtr(), sizeof(self._pvt_ptr[0].frame)) * @property # <<<<<<<<<<<<<< * def planeDesc(self): * out_planeDesc = [cudaEglPlaneDesc() for _planeDesc in self._pvt_ptr[0].planeDesc] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglFrame_st.planeDesc.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_out_planeDesc); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13617 * string.memcpy(out_planeDesc[_idx].getPtr(), &self._pvt_ptr[0].planeDesc[_idx], sizeof(cyruntime.cudaEglPlaneDesc)) * return out_planeDesc * @planeDesc.setter # <<<<<<<<<<<<<< * def planeDesc(self, planeDesc : list[cudaEglPlaneDesc]): * if len(planeDesc) != 3: */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_9planeDesc_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_planeDesc); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_9planeDesc_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_planeDesc) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_planeDesc), (&PyList_Type), 0, "planeDesc", 2))) __PYX_ERR(0, 13618, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaEglFrame_st_9planeDesc_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglFrame_st *)__pyx_v_self), ((PyObject*)__pyx_v_planeDesc)); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; goto __pyx_L5_cleaned_up; __pyx_L0:; __pyx_L5_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_15cudaEglFrame_st_9planeDesc_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglFrame_st *__pyx_v_self, PyObject *__pyx_v_planeDesc) { Py_ssize_t __pyx_v__idx; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; size_t __pyx_t_7; Py_ssize_t __pyx_t_8; Py_ssize_t __pyx_t_9; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_10; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); /* "cuda/bindings/runtime.pyx":13619 * @planeDesc.setter * def planeDesc(self, planeDesc : list[cudaEglPlaneDesc]): * if len(planeDesc) != 3: # <<<<<<<<<<<<<< * raise IndexError('not enough values found during array assignment, expected 3, got', len(planeDesc)) * for _idx in range(len(planeDesc)): */ __pyx_t_1 = __Pyx_PyList_GET_SIZE(__pyx_v_planeDesc); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 13619, __pyx_L1_error) __pyx_t_2 = (__pyx_t_1 != 3); if (unlikely(__pyx_t_2)) { /* "cuda/bindings/runtime.pyx":13620 * def planeDesc(self, planeDesc : list[cudaEglPlaneDesc]): * if len(planeDesc) != 3: * raise IndexError('not enough values found during array assignment, expected 3, got', len(planeDesc)) # <<<<<<<<<<<<<< * for _idx in range(len(planeDesc)): * string.memcpy(&self._pvt_ptr[0].planeDesc[_idx], planeDesc[_idx].getPtr(), sizeof(cyruntime.cudaEglPlaneDesc)) */ __pyx_t_4 = NULL; __Pyx_INCREF(__pyx_builtin_IndexError); __pyx_t_5 = __pyx_builtin_IndexError; __pyx_t_1 = __Pyx_PyList_GET_SIZE(__pyx_v_planeDesc); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 13620, __pyx_L1_error) __pyx_t_6 = PyLong_FromSsize_t(__pyx_t_1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13620, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = 1; { PyObject *__pyx_callargs[3] = {__pyx_t_4, __pyx_mstate_global->__pyx_kp_u_not_enough_values_found_during_a, __pyx_t_6}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_7, (3-__pyx_t_7) | (__pyx_t_7*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13620, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 13620, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":13619 * @planeDesc.setter * def planeDesc(self, planeDesc : list[cudaEglPlaneDesc]): * if len(planeDesc) != 3: # <<<<<<<<<<<<<< * raise IndexError('not enough values found during array assignment, expected 3, got', len(planeDesc)) * for _idx in range(len(planeDesc)): */ } /* "cuda/bindings/runtime.pyx":13621 * if len(planeDesc) != 3: * raise IndexError('not enough values found during array assignment, expected 3, got', len(planeDesc)) * for _idx in range(len(planeDesc)): # <<<<<<<<<<<<<< * string.memcpy(&self._pvt_ptr[0].planeDesc[_idx], planeDesc[_idx].getPtr(), sizeof(cyruntime.cudaEglPlaneDesc)) * */ __pyx_t_1 = __Pyx_PyList_GET_SIZE(__pyx_v_planeDesc); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 13621, __pyx_L1_error) __pyx_t_8 = __pyx_t_1; for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) { __pyx_v__idx = __pyx_t_9; /* "cuda/bindings/runtime.pyx":13622 * raise IndexError('not enough values found during array assignment, expected 3, got', len(planeDesc)) * for _idx in range(len(planeDesc)): * string.memcpy(&self._pvt_ptr[0].planeDesc[_idx], planeDesc[_idx].getPtr(), sizeof(cyruntime.cudaEglPlaneDesc)) # <<<<<<<<<<<<<< * * @property */ __pyx_t_6 = __Pyx_GetItemInt_List(__pyx_v_planeDesc, __pyx_v__idx, Py_ssize_t, 1, PyLong_FromSsize_t, 1, 1, 1, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 13622, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = __pyx_t_6; __Pyx_INCREF(__pyx_t_5); __pyx_t_7 = 0; { PyObject *__pyx_callargs[2] = {__pyx_t_5, NULL}; __pyx_t_3 = __Pyx_PyObject_FastCallMethod(__pyx_mstate_global->__pyx_n_u_getPtr, __pyx_callargs+__pyx_t_7, (1-__pyx_t_7) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13622, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } __pyx_t_10 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_3); if (unlikely((__pyx_t_10 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13622, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; (void)(memcpy((&((__pyx_v_self->_pvt_ptr[0]).planeDesc[__pyx_v__idx])), ((__pyx_t_4cuda_8bindings_9cyruntime_cudaEglPlaneDesc *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_10)), (sizeof(__pyx_t_4cuda_8bindings_9cyruntime_cudaEglPlaneDesc)))); } /* "cuda/bindings/runtime.pyx":13617 * string.memcpy(out_planeDesc[_idx].getPtr(), &self._pvt_ptr[0].planeDesc[_idx], sizeof(cyruntime.cudaEglPlaneDesc)) * return out_planeDesc * @planeDesc.setter # <<<<<<<<<<<<<< * def planeDesc(self, planeDesc : list[cudaEglPlaneDesc]): * if len(planeDesc) != 3: */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglFrame_st.planeDesc.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13624 * string.memcpy(&self._pvt_ptr[0].planeDesc[_idx], planeDesc[_idx].getPtr(), sizeof(cyruntime.cudaEglPlaneDesc)) * * @property # <<<<<<<<<<<<<< * def planeCount(self): * return self._pvt_ptr[0].planeCount */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_10planeCount_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_10planeCount_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaEglFrame_st_10planeCount___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglFrame_st *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_15cudaEglFrame_st_10planeCount___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglFrame_st *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":13626 * @property * def planeCount(self): * return self._pvt_ptr[0].planeCount # <<<<<<<<<<<<<< * @planeCount.setter * def planeCount(self, unsigned int planeCount): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_int((__pyx_v_self->_pvt_ptr[0]).planeCount); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13626, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13624 * string.memcpy(&self._pvt_ptr[0].planeDesc[_idx], planeDesc[_idx].getPtr(), sizeof(cyruntime.cudaEglPlaneDesc)) * * @property # <<<<<<<<<<<<<< * def planeCount(self): * return self._pvt_ptr[0].planeCount */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglFrame_st.planeCount.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13627 * def planeCount(self): * return self._pvt_ptr[0].planeCount * @planeCount.setter # <<<<<<<<<<<<<< * def planeCount(self, unsigned int planeCount): * self._pvt_ptr[0].planeCount = planeCount */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_10planeCount_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_planeCount); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_10planeCount_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_arg_planeCount) { unsigned int __pyx_v_planeCount; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); assert(__pyx_arg_planeCount); { __pyx_v_planeCount = __Pyx_PyLong_As_unsigned_int(__pyx_arg_planeCount); if (unlikely((__pyx_v_planeCount == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13628, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglFrame_st.planeCount.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaEglFrame_st_10planeCount_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglFrame_st *)__pyx_v_self), ((unsigned int)__pyx_v_planeCount)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_15cudaEglFrame_st_10planeCount_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglFrame_st *__pyx_v_self, unsigned int __pyx_v_planeCount) { int __pyx_r; /* "cuda/bindings/runtime.pyx":13629 * @planeCount.setter * def planeCount(self, unsigned int planeCount): * self._pvt_ptr[0].planeCount = planeCount # <<<<<<<<<<<<<< * @property * def frameType(self): */ (__pyx_v_self->_pvt_ptr[0]).planeCount = __pyx_v_planeCount; /* "cuda/bindings/runtime.pyx":13627 * def planeCount(self): * return self._pvt_ptr[0].planeCount * @planeCount.setter # <<<<<<<<<<<<<< * def planeCount(self, unsigned int planeCount): * self._pvt_ptr[0].planeCount = planeCount */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":13630 * def planeCount(self, unsigned int planeCount): * self._pvt_ptr[0].planeCount = planeCount * @property # <<<<<<<<<<<<<< * def frameType(self): * if self._pvt_ptr[0].frameType not in _dict_cudaEglFrameType: */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_9frameType_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_9frameType_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaEglFrame_st_9frameType___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglFrame_st *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_15cudaEglFrame_st_9frameType___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglFrame_st *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":13632 * @property * def frameType(self): * if self._pvt_ptr[0].frameType not in _dict_cudaEglFrameType: # <<<<<<<<<<<<<< * return None * return _dict_cudaEglFrameType[self._pvt_ptr[0].frameType] */ __pyx_t_1 = __Pyx_PyLong_From_enum____pyx_t_4cuda_8bindings_9cyruntime_cudaEglFrameType_enum((__pyx_v_self->_pvt_ptr[0]).frameType); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13632, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaEglFrameType); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13632, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = (__Pyx_PySequence_ContainsTF(__pyx_t_1, __pyx_t_2, Py_NE)); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 13632, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (__pyx_t_3) { /* "cuda/bindings/runtime.pyx":13633 * def frameType(self): * if self._pvt_ptr[0].frameType not in _dict_cudaEglFrameType: * return None # <<<<<<<<<<<<<< * return _dict_cudaEglFrameType[self._pvt_ptr[0].frameType] * @frameType.setter */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13632 * @property * def frameType(self): * if self._pvt_ptr[0].frameType not in _dict_cudaEglFrameType: # <<<<<<<<<<<<<< * return None * return _dict_cudaEglFrameType[self._pvt_ptr[0].frameType] */ } /* "cuda/bindings/runtime.pyx":13634 * if self._pvt_ptr[0].frameType not in _dict_cudaEglFrameType: * return None * return _dict_cudaEglFrameType[self._pvt_ptr[0].frameType] # <<<<<<<<<<<<<< * @frameType.setter * def frameType(self, frameType not None : cudaEglFrameType): */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaEglFrameType); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13634, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyLong_From_enum____pyx_t_4cuda_8bindings_9cyruntime_cudaEglFrameType_enum((__pyx_v_self->_pvt_ptr[0]).frameType); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13634, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_2, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13634, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13630 * def planeCount(self, unsigned int planeCount): * self._pvt_ptr[0].planeCount = planeCount * @property # <<<<<<<<<<<<<< * def frameType(self): * if self._pvt_ptr[0].frameType not in _dict_cudaEglFrameType: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglFrame_st.frameType.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13635 * return None * return _dict_cudaEglFrameType[self._pvt_ptr[0].frameType] * @frameType.setter # <<<<<<<<<<<<<< * def frameType(self, frameType not None : cudaEglFrameType): * self._pvt_ptr[0].frameType = frameType.value */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_9frameType_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_frameType); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_9frameType_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_frameType) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); if (unlikely(((PyObject *)__pyx_v_frameType) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "frameType"); __PYX_ERR(0, 13636, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaEglFrame_st_9frameType_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglFrame_st *)__pyx_v_self), ((PyObject *)__pyx_v_frameType)); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; goto __pyx_L5_cleaned_up; __pyx_L0:; __pyx_L5_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_15cudaEglFrame_st_9frameType_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglFrame_st *__pyx_v_self, PyObject *__pyx_v_frameType) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __pyx_t_4cuda_8bindings_9cyruntime_cudaEglFrameType __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); /* "cuda/bindings/runtime.pyx":13637 * @frameType.setter * def frameType(self, frameType not None : cudaEglFrameType): * self._pvt_ptr[0].frameType = frameType.value # <<<<<<<<<<<<<< * @property * def eglColorFormat(self): */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_frameType, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13637, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = ((enum __pyx_t_4cuda_8bindings_9cyruntime_cudaEglFrameType_enum)__Pyx_PyLong_As_enum____pyx_t_4cuda_8bindings_9cyruntime_cudaEglFrameType_enum(__pyx_t_1)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 13637, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; (__pyx_v_self->_pvt_ptr[0]).frameType = __pyx_t_2; /* "cuda/bindings/runtime.pyx":13635 * return None * return _dict_cudaEglFrameType[self._pvt_ptr[0].frameType] * @frameType.setter # <<<<<<<<<<<<<< * def frameType(self, frameType not None : cudaEglFrameType): * self._pvt_ptr[0].frameType = frameType.value */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglFrame_st.frameType.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13638 * def frameType(self, frameType not None : cudaEglFrameType): * self._pvt_ptr[0].frameType = frameType.value * @property # <<<<<<<<<<<<<< * def eglColorFormat(self): * if self._pvt_ptr[0].eglColorFormat not in _dict_cudaEglColorFormat: */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_14eglColorFormat_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_14eglColorFormat_1__get__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaEglFrame_st_14eglColorFormat___get__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglFrame_st *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_15cudaEglFrame_st_14eglColorFormat___get__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglFrame_st *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "cuda/bindings/runtime.pyx":13640 * @property * def eglColorFormat(self): * if self._pvt_ptr[0].eglColorFormat not in _dict_cudaEglColorFormat: # <<<<<<<<<<<<<< * return None * return _dict_cudaEglColorFormat[self._pvt_ptr[0].eglColorFormat] */ __pyx_t_1 = __Pyx_PyLong_From_enum____pyx_t_4cuda_8bindings_9cyruntime_cudaEglColorFormat_enum((__pyx_v_self->_pvt_ptr[0]).eglColorFormat); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13640, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaEglColorFormat); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13640, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = (__Pyx_PySequence_ContainsTF(__pyx_t_1, __pyx_t_2, Py_NE)); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 13640, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (__pyx_t_3) { /* "cuda/bindings/runtime.pyx":13641 * def eglColorFormat(self): * if self._pvt_ptr[0].eglColorFormat not in _dict_cudaEglColorFormat: * return None # <<<<<<<<<<<<<< * return _dict_cudaEglColorFormat[self._pvt_ptr[0].eglColorFormat] * @eglColorFormat.setter */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13640 * @property * def eglColorFormat(self): * if self._pvt_ptr[0].eglColorFormat not in _dict_cudaEglColorFormat: # <<<<<<<<<<<<<< * return None * return _dict_cudaEglColorFormat[self._pvt_ptr[0].eglColorFormat] */ } /* "cuda/bindings/runtime.pyx":13642 * if self._pvt_ptr[0].eglColorFormat not in _dict_cudaEglColorFormat: * return None * return _dict_cudaEglColorFormat[self._pvt_ptr[0].eglColorFormat] # <<<<<<<<<<<<<< * @eglColorFormat.setter * def eglColorFormat(self, eglColorFormat not None : cudaEglColorFormat): */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaEglColorFormat); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13642, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyLong_From_enum____pyx_t_4cuda_8bindings_9cyruntime_cudaEglColorFormat_enum((__pyx_v_self->_pvt_ptr[0]).eglColorFormat); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13642, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_2, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13642, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13638 * def frameType(self, frameType not None : cudaEglFrameType): * self._pvt_ptr[0].frameType = frameType.value * @property # <<<<<<<<<<<<<< * def eglColorFormat(self): * if self._pvt_ptr[0].eglColorFormat not in _dict_cudaEglColorFormat: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglFrame_st.eglColorFormat.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13643 * return None * return _dict_cudaEglColorFormat[self._pvt_ptr[0].eglColorFormat] * @eglColorFormat.setter # <<<<<<<<<<<<<< * def eglColorFormat(self, eglColorFormat not None : cudaEglColorFormat): * self._pvt_ptr[0].eglColorFormat = eglColorFormat.value */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_14eglColorFormat_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_eglColorFormat); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_14eglColorFormat_3__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_eglColorFormat) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__set__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); if (unlikely(((PyObject *)__pyx_v_eglColorFormat) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "eglColorFormat"); __PYX_ERR(0, 13644, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaEglFrame_st_14eglColorFormat_2__set__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglFrame_st *)__pyx_v_self), ((PyObject *)__pyx_v_eglColorFormat)); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; goto __pyx_L5_cleaned_up; __pyx_L0:; __pyx_L5_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_15cudaEglFrame_st_14eglColorFormat_2__set__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglFrame_st *__pyx_v_self, PyObject *__pyx_v_eglColorFormat) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __pyx_t_4cuda_8bindings_9cyruntime_cudaEglColorFormat __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__set__", 0); /* "cuda/bindings/runtime.pyx":13645 * @eglColorFormat.setter * def eglColorFormat(self, eglColorFormat not None : cudaEglColorFormat): * self._pvt_ptr[0].eglColorFormat = eglColorFormat.value # <<<<<<<<<<<<<< * * cdef class cudaGraphConditionalHandle: */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_eglColorFormat, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13645, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = ((enum __pyx_t_4cuda_8bindings_9cyruntime_cudaEglColorFormat_enum)__Pyx_PyLong_As_enum____pyx_t_4cuda_8bindings_9cyruntime_cudaEglColorFormat_enum(__pyx_t_1)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 13645, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; (__pyx_v_self->_pvt_ptr[0]).eglColorFormat = __pyx_t_2; /* "cuda/bindings/runtime.pyx":13643 * return None * return _dict_cudaEglColorFormat[self._pvt_ptr[0].eglColorFormat] * @eglColorFormat.setter # <<<<<<<<<<<<<< * def eglColorFormat(self, eglColorFormat not None : cudaEglColorFormat): * self._pvt_ptr[0].eglColorFormat = eglColorFormat.value */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglFrame_st.eglColorFormat.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_15cudaEglFrame_st_10__reduce_cython__, "cudaEglFrame_st.__reduce_cython__(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_15cudaEglFrame_st_11__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_11__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_15cudaEglFrame_st_10__reduce_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaEglFrame_st_10__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglFrame_st *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_15cudaEglFrame_st_10__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglFrame_st *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglFrame_st.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_15cudaEglFrame_st_12__setstate_cython__, "cudaEglFrame_st.__setstate_cython__(self, __pyx_state)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_15cudaEglFrame_st_13__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_13__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_15cudaEglFrame_st_12__setstate_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaEglFrame_st_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(2, 3, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(2, 3, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(2, 3, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) } __pyx_v___pyx_state = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(2, 3, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglFrame_st.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15cudaEglFrame_st_12__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglFrame_st *)__pyx_v_self), __pyx_v___pyx_state); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_15cudaEglFrame_st_12__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_cudaEglFrame_st *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaEglFrame_st.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13658 * * """ * def __cinit__(self, unsigned long long init_value = 0, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * if _ptr == 0: * self._pvt_ptr = &self._pvt_val */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { unsigned PY_LONG_LONG __pyx_v_init_value; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr; CYTHON_UNUSED Py_ssize_t __pyx_nargs; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1; #endif __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_init_value,&__pyx_mstate_global->__pyx_n_u_ptr_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13658, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_VARARGS(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 13658, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13658, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__cinit__", 0) < (0)) __PYX_ERR(0, 13658, __pyx_L3_error) } else { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_VARARGS(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 13658, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13658, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } } if (values[0]) { __pyx_v_init_value = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[0]); if (unlikely((__pyx_v_init_value == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13658, __pyx_L3_error) } else { __pyx_v_init_value = ((unsigned PY_LONG_LONG)0); } if (values[1]) { __pyx_v__ptr = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[1]); if (unlikely((__pyx_v__ptr == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13658, __pyx_L3_error) } else { __pyx_v__ptr = ((__pyx_t_4cuda_8bindings_7runtime_void_ptr)0); } } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 0, 2, __pyx_nargs); __PYX_ERR(0, 13658, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphConditionalHandle.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle___cinit__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphConditionalHandle *)__pyx_v_self), __pyx_v_init_value, __pyx_v__ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle___cinit__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphConditionalHandle *__pyx_v_self, unsigned PY_LONG_LONG __pyx_v_init_value, __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr) { int __pyx_r; int __pyx_t_1; /* "cuda/bindings/runtime.pyx":13659 * """ * def __cinit__(self, unsigned long long init_value = 0, void_ptr _ptr = 0): * if _ptr == 0: # <<<<<<<<<<<<<< * self._pvt_ptr = &self._pvt_val * else: */ __pyx_t_1 = (__pyx_v__ptr == 0); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":13660 * def __cinit__(self, unsigned long long init_value = 0, void_ptr _ptr = 0): * if _ptr == 0: * self._pvt_ptr = &self._pvt_val # <<<<<<<<<<<<<< * else: * self._pvt_ptr = _ptr */ __pyx_v_self->_pvt_ptr = (&__pyx_v_self->_pvt_val); /* "cuda/bindings/runtime.pyx":13659 * """ * def __cinit__(self, unsigned long long init_value = 0, void_ptr _ptr = 0): * if _ptr == 0: # <<<<<<<<<<<<<< * self._pvt_ptr = &self._pvt_val * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":13662 * self._pvt_ptr = &self._pvt_val * else: * self._pvt_ptr = _ptr # <<<<<<<<<<<<<< * if init_value: * self._pvt_ptr[0] = init_value */ /*else*/ { __pyx_v_self->_pvt_ptr = ((cudaGraphConditionalHandle *)__pyx_v__ptr); } __pyx_L3:; /* "cuda/bindings/runtime.pyx":13663 * else: * self._pvt_ptr = _ptr * if init_value: # <<<<<<<<<<<<<< * self._pvt_ptr[0] = init_value * def __dealloc__(self): */ __pyx_t_1 = (__pyx_v_init_value != 0); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":13664 * self._pvt_ptr = _ptr * if init_value: * self._pvt_ptr[0] = init_value # <<<<<<<<<<<<<< * def __dealloc__(self): * pass */ (__pyx_v_self->_pvt_ptr[0]) = __pyx_v_init_value; /* "cuda/bindings/runtime.pyx":13663 * else: * self._pvt_ptr = _ptr * if init_value: # <<<<<<<<<<<<<< * self._pvt_ptr[0] = init_value * def __dealloc__(self): */ } /* "cuda/bindings/runtime.pyx":13658 * * """ * def __cinit__(self, unsigned long long init_value = 0, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * if _ptr == 0: * self._pvt_ptr = &self._pvt_val */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":13665 * if init_value: * self._pvt_ptr[0] = init_value * def __dealloc__(self): # <<<<<<<<<<<<<< * pass * def __repr__(self): */ /* Python wrapper */ static void __pyx_pw_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_3__dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_pw_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_3__dealloc__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_pf_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphConditionalHandle *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_2__dealloc__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphConditionalHandle *__pyx_v_self) { /* function exit code */ } /* "cuda/bindings/runtime.pyx":13667 * def __dealloc__(self): * pass * def __repr__(self): # <<<<<<<<<<<<<< * return '' * def __int__(self): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_5__repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_5__repr__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_4__repr__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphConditionalHandle *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_4__repr__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphConditionalHandle *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; size_t __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "cuda/bindings/runtime.pyx":13668 * pass * def __repr__(self): * return '' # <<<<<<<<<<<<<< * def __int__(self): * return self._pvt_ptr[0] */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = ((PyObject *)__pyx_v_self); __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCallMethod(__pyx_mstate_global->__pyx_n_u_int, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13668, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __pyx_t_2 = __Pyx_PyObject_Unicode(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13668, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_cudaGraphConditionalHandle, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13668, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13668, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13667 * def __dealloc__(self): * pass * def __repr__(self): # <<<<<<<<<<<<<< * return '' * def __int__(self): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphConditionalHandle.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13669 * def __repr__(self): * return '' * def __int__(self): # <<<<<<<<<<<<<< * return self._pvt_ptr[0] * def getPtr(self): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_7__int__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_7__int__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__int__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_6__int__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphConditionalHandle *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_6__int__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphConditionalHandle *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__int__", 0); /* "cuda/bindings/runtime.pyx":13670 * return '' * def __int__(self): * return self._pvt_ptr[0] # <<<<<<<<<<<<<< * def getPtr(self): * return self._pvt_ptr */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((unsigned PY_LONG_LONG)(__pyx_v_self->_pvt_ptr[0]))); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13670, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13669 * def __repr__(self): * return '' * def __int__(self): # <<<<<<<<<<<<<< * return self._pvt_ptr[0] * def getPtr(self): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphConditionalHandle.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13671 * def __int__(self): * return self._pvt_ptr[0] * def getPtr(self): # <<<<<<<<<<<<<< * return self._pvt_ptr * */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_9getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_8getPtr, "cudaGraphConditionalHandle.getPtr(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_9getPtr = {"getPtr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_9getPtr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_8getPtr}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_9getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("getPtr (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("getPtr", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("getPtr", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_8getPtr(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphConditionalHandle *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_8getPtr(struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphConditionalHandle *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("getPtr", 0); /* "cuda/bindings/runtime.pyx":13672 * return self._pvt_ptr[0] * def getPtr(self): * return self._pvt_ptr # <<<<<<<<<<<<<< * * cdef class cudaSurfaceObject_t: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_v_self->_pvt_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13672, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13671 * def __int__(self): * return self._pvt_ptr[0] * def getPtr(self): # <<<<<<<<<<<<<< * return self._pvt_ptr * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphConditionalHandle.getPtr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_10__reduce_cython__, "cudaGraphConditionalHandle.__reduce_cython__(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_11__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_11__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_10__reduce_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_10__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphConditionalHandle *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_10__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphConditionalHandle *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphConditionalHandle.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_12__setstate_cython__, "cudaGraphConditionalHandle.__setstate_cython__(self, __pyx_state)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_13__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_13__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_12__setstate_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(2, 3, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(2, 3, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(2, 3, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) } __pyx_v___pyx_state = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(2, 3, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphConditionalHandle.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_12__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphConditionalHandle *)__pyx_v_self), __pyx_v___pyx_state); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_26cudaGraphConditionalHandle_12__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphConditionalHandle *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphConditionalHandle.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13685 * * """ * def __cinit__(self, unsigned long long init_value = 0, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * if _ptr == 0: * self._pvt_ptr = &self._pvt_val */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { unsigned PY_LONG_LONG __pyx_v_init_value; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr; CYTHON_UNUSED Py_ssize_t __pyx_nargs; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1; #endif __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_init_value,&__pyx_mstate_global->__pyx_n_u_ptr_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13685, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_VARARGS(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 13685, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13685, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__cinit__", 0) < (0)) __PYX_ERR(0, 13685, __pyx_L3_error) } else { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_VARARGS(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 13685, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13685, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } } if (values[0]) { __pyx_v_init_value = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[0]); if (unlikely((__pyx_v_init_value == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13685, __pyx_L3_error) } else { __pyx_v_init_value = ((unsigned PY_LONG_LONG)0); } if (values[1]) { __pyx_v__ptr = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[1]); if (unlikely((__pyx_v__ptr == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13685, __pyx_L3_error) } else { __pyx_v__ptr = ((__pyx_t_4cuda_8bindings_7runtime_void_ptr)0); } } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 0, 2, __pyx_nargs); __PYX_ERR(0, 13685, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaSurfaceObject_t.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_19cudaSurfaceObject_t___cinit__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaSurfaceObject_t *)__pyx_v_self), __pyx_v_init_value, __pyx_v__ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_19cudaSurfaceObject_t___cinit__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaSurfaceObject_t *__pyx_v_self, unsigned PY_LONG_LONG __pyx_v_init_value, __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr) { int __pyx_r; int __pyx_t_1; /* "cuda/bindings/runtime.pyx":13686 * """ * def __cinit__(self, unsigned long long init_value = 0, void_ptr _ptr = 0): * if _ptr == 0: # <<<<<<<<<<<<<< * self._pvt_ptr = &self._pvt_val * else: */ __pyx_t_1 = (__pyx_v__ptr == 0); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":13687 * def __cinit__(self, unsigned long long init_value = 0, void_ptr _ptr = 0): * if _ptr == 0: * self._pvt_ptr = &self._pvt_val # <<<<<<<<<<<<<< * else: * self._pvt_ptr = _ptr */ __pyx_v_self->_pvt_ptr = (&__pyx_v_self->_pvt_val); /* "cuda/bindings/runtime.pyx":13686 * """ * def __cinit__(self, unsigned long long init_value = 0, void_ptr _ptr = 0): * if _ptr == 0: # <<<<<<<<<<<<<< * self._pvt_ptr = &self._pvt_val * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":13689 * self._pvt_ptr = &self._pvt_val * else: * self._pvt_ptr = _ptr # <<<<<<<<<<<<<< * if init_value: * self._pvt_ptr[0] = init_value */ /*else*/ { __pyx_v_self->_pvt_ptr = ((cudaSurfaceObject_t *)__pyx_v__ptr); } __pyx_L3:; /* "cuda/bindings/runtime.pyx":13690 * else: * self._pvt_ptr = _ptr * if init_value: # <<<<<<<<<<<<<< * self._pvt_ptr[0] = init_value * def __dealloc__(self): */ __pyx_t_1 = (__pyx_v_init_value != 0); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":13691 * self._pvt_ptr = _ptr * if init_value: * self._pvt_ptr[0] = init_value # <<<<<<<<<<<<<< * def __dealloc__(self): * pass */ (__pyx_v_self->_pvt_ptr[0]) = __pyx_v_init_value; /* "cuda/bindings/runtime.pyx":13690 * else: * self._pvt_ptr = _ptr * if init_value: # <<<<<<<<<<<<<< * self._pvt_ptr[0] = init_value * def __dealloc__(self): */ } /* "cuda/bindings/runtime.pyx":13685 * * """ * def __cinit__(self, unsigned long long init_value = 0, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * if _ptr == 0: * self._pvt_ptr = &self._pvt_val */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":13692 * if init_value: * self._pvt_ptr[0] = init_value * def __dealloc__(self): # <<<<<<<<<<<<<< * pass * def __repr__(self): */ /* Python wrapper */ static void __pyx_pw_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_3__dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_pw_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_3__dealloc__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_pf_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaSurfaceObject_t *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_2__dealloc__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_cudaSurfaceObject_t *__pyx_v_self) { /* function exit code */ } /* "cuda/bindings/runtime.pyx":13694 * def __dealloc__(self): * pass * def __repr__(self): # <<<<<<<<<<<<<< * return '' * def __int__(self): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_5__repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_5__repr__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_4__repr__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaSurfaceObject_t *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_4__repr__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaSurfaceObject_t *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; size_t __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "cuda/bindings/runtime.pyx":13695 * pass * def __repr__(self): * return '' # <<<<<<<<<<<<<< * def __int__(self): * return self._pvt_ptr[0] */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = ((PyObject *)__pyx_v_self); __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCallMethod(__pyx_mstate_global->__pyx_n_u_int, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13695, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __pyx_t_2 = __Pyx_PyObject_Unicode(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13695, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_cudaSurfaceObject_t, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13695, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13695, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13694 * def __dealloc__(self): * pass * def __repr__(self): # <<<<<<<<<<<<<< * return '' * def __int__(self): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("cuda.bindings.runtime.cudaSurfaceObject_t.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13696 * def __repr__(self): * return '' * def __int__(self): # <<<<<<<<<<<<<< * return self._pvt_ptr[0] * def getPtr(self): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_7__int__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_7__int__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__int__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_6__int__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaSurfaceObject_t *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_6__int__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaSurfaceObject_t *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__int__", 0); /* "cuda/bindings/runtime.pyx":13697 * return '' * def __int__(self): * return self._pvt_ptr[0] # <<<<<<<<<<<<<< * def getPtr(self): * return self._pvt_ptr */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((unsigned PY_LONG_LONG)(__pyx_v_self->_pvt_ptr[0]))); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13697, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13696 * def __repr__(self): * return '' * def __int__(self): # <<<<<<<<<<<<<< * return self._pvt_ptr[0] * def getPtr(self): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaSurfaceObject_t.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13698 * def __int__(self): * return self._pvt_ptr[0] * def getPtr(self): # <<<<<<<<<<<<<< * return self._pvt_ptr * */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_9getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_8getPtr, "cudaSurfaceObject_t.getPtr(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_9getPtr = {"getPtr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_9getPtr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_8getPtr}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_9getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("getPtr (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("getPtr", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("getPtr", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_8getPtr(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaSurfaceObject_t *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_8getPtr(struct __pyx_obj_4cuda_8bindings_7runtime_cudaSurfaceObject_t *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("getPtr", 0); /* "cuda/bindings/runtime.pyx":13699 * return self._pvt_ptr[0] * def getPtr(self): * return self._pvt_ptr # <<<<<<<<<<<<<< * * cdef class cudaTextureObject_t: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_v_self->_pvt_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13699, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13698 * def __int__(self): * return self._pvt_ptr[0] * def getPtr(self): # <<<<<<<<<<<<<< * return self._pvt_ptr * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaSurfaceObject_t.getPtr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_10__reduce_cython__, "cudaSurfaceObject_t.__reduce_cython__(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_11__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_11__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_10__reduce_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_10__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaSurfaceObject_t *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_10__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_cudaSurfaceObject_t *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaSurfaceObject_t.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_12__setstate_cython__, "cudaSurfaceObject_t.__setstate_cython__(self, __pyx_state)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_13__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_13__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_12__setstate_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(2, 3, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(2, 3, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(2, 3, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) } __pyx_v___pyx_state = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(2, 3, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaSurfaceObject_t.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_12__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaSurfaceObject_t *)__pyx_v_self), __pyx_v___pyx_state); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_19cudaSurfaceObject_t_12__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_cudaSurfaceObject_t *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaSurfaceObject_t.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13712 * * """ * def __cinit__(self, unsigned long long init_value = 0, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * if _ptr == 0: * self._pvt_ptr = &self._pvt_val */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_19cudaTextureObject_t_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_19cudaTextureObject_t_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { unsigned PY_LONG_LONG __pyx_v_init_value; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr; CYTHON_UNUSED Py_ssize_t __pyx_nargs; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1; #endif __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_init_value,&__pyx_mstate_global->__pyx_n_u_ptr_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13712, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_VARARGS(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 13712, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13712, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__cinit__", 0) < (0)) __PYX_ERR(0, 13712, __pyx_L3_error) } else { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_VARARGS(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 13712, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13712, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } } if (values[0]) { __pyx_v_init_value = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[0]); if (unlikely((__pyx_v_init_value == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13712, __pyx_L3_error) } else { __pyx_v_init_value = ((unsigned PY_LONG_LONG)0); } if (values[1]) { __pyx_v__ptr = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[1]); if (unlikely((__pyx_v__ptr == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13712, __pyx_L3_error) } else { __pyx_v__ptr = ((__pyx_t_4cuda_8bindings_7runtime_void_ptr)0); } } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 0, 2, __pyx_nargs); __PYX_ERR(0, 13712, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureObject_t.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_19cudaTextureObject_t___cinit__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureObject_t *)__pyx_v_self), __pyx_v_init_value, __pyx_v__ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_19cudaTextureObject_t___cinit__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureObject_t *__pyx_v_self, unsigned PY_LONG_LONG __pyx_v_init_value, __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr) { int __pyx_r; int __pyx_t_1; /* "cuda/bindings/runtime.pyx":13713 * """ * def __cinit__(self, unsigned long long init_value = 0, void_ptr _ptr = 0): * if _ptr == 0: # <<<<<<<<<<<<<< * self._pvt_ptr = &self._pvt_val * else: */ __pyx_t_1 = (__pyx_v__ptr == 0); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":13714 * def __cinit__(self, unsigned long long init_value = 0, void_ptr _ptr = 0): * if _ptr == 0: * self._pvt_ptr = &self._pvt_val # <<<<<<<<<<<<<< * else: * self._pvt_ptr = _ptr */ __pyx_v_self->_pvt_ptr = (&__pyx_v_self->_pvt_val); /* "cuda/bindings/runtime.pyx":13713 * """ * def __cinit__(self, unsigned long long init_value = 0, void_ptr _ptr = 0): * if _ptr == 0: # <<<<<<<<<<<<<< * self._pvt_ptr = &self._pvt_val * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":13716 * self._pvt_ptr = &self._pvt_val * else: * self._pvt_ptr = _ptr # <<<<<<<<<<<<<< * if init_value: * self._pvt_ptr[0] = init_value */ /*else*/ { __pyx_v_self->_pvt_ptr = ((cudaTextureObject_t *)__pyx_v__ptr); } __pyx_L3:; /* "cuda/bindings/runtime.pyx":13717 * else: * self._pvt_ptr = _ptr * if init_value: # <<<<<<<<<<<<<< * self._pvt_ptr[0] = init_value * def __dealloc__(self): */ __pyx_t_1 = (__pyx_v_init_value != 0); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":13718 * self._pvt_ptr = _ptr * if init_value: * self._pvt_ptr[0] = init_value # <<<<<<<<<<<<<< * def __dealloc__(self): * pass */ (__pyx_v_self->_pvt_ptr[0]) = __pyx_v_init_value; /* "cuda/bindings/runtime.pyx":13717 * else: * self._pvt_ptr = _ptr * if init_value: # <<<<<<<<<<<<<< * self._pvt_ptr[0] = init_value * def __dealloc__(self): */ } /* "cuda/bindings/runtime.pyx":13712 * * """ * def __cinit__(self, unsigned long long init_value = 0, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * if _ptr == 0: * self._pvt_ptr = &self._pvt_val */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":13719 * if init_value: * self._pvt_ptr[0] = init_value * def __dealloc__(self): # <<<<<<<<<<<<<< * pass * def __repr__(self): */ /* Python wrapper */ static void __pyx_pw_4cuda_8bindings_7runtime_19cudaTextureObject_t_3__dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_pw_4cuda_8bindings_7runtime_19cudaTextureObject_t_3__dealloc__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_pf_4cuda_8bindings_7runtime_19cudaTextureObject_t_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureObject_t *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_4cuda_8bindings_7runtime_19cudaTextureObject_t_2__dealloc__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureObject_t *__pyx_v_self) { /* function exit code */ } /* "cuda/bindings/runtime.pyx":13721 * def __dealloc__(self): * pass * def __repr__(self): # <<<<<<<<<<<<<< * return '' * def __int__(self): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaTextureObject_t_5__repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaTextureObject_t_5__repr__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_19cudaTextureObject_t_4__repr__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureObject_t *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_19cudaTextureObject_t_4__repr__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureObject_t *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; size_t __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "cuda/bindings/runtime.pyx":13722 * pass * def __repr__(self): * return '' # <<<<<<<<<<<<<< * def __int__(self): * return self._pvt_ptr[0] */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = ((PyObject *)__pyx_v_self); __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCallMethod(__pyx_mstate_global->__pyx_n_u_int, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13722, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __pyx_t_2 = __Pyx_PyObject_Unicode(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13722, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_cudaTextureObject_t, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13722, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13722, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13721 * def __dealloc__(self): * pass * def __repr__(self): # <<<<<<<<<<<<<< * return '' * def __int__(self): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureObject_t.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13723 * def __repr__(self): * return '' * def __int__(self): # <<<<<<<<<<<<<< * return self._pvt_ptr[0] * def getPtr(self): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaTextureObject_t_7__int__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaTextureObject_t_7__int__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__int__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_19cudaTextureObject_t_6__int__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureObject_t *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_19cudaTextureObject_t_6__int__(struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureObject_t *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__int__", 0); /* "cuda/bindings/runtime.pyx":13724 * return '' * def __int__(self): * return self._pvt_ptr[0] # <<<<<<<<<<<<<< * def getPtr(self): * return self._pvt_ptr */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((unsigned PY_LONG_LONG)(__pyx_v_self->_pvt_ptr[0]))); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13724, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13723 * def __repr__(self): * return '' * def __int__(self): # <<<<<<<<<<<<<< * return self._pvt_ptr[0] * def getPtr(self): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureObject_t.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13725 * def __int__(self): * return self._pvt_ptr[0] * def getPtr(self): # <<<<<<<<<<<<<< * return self._pvt_ptr * */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaTextureObject_t_9getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_19cudaTextureObject_t_8getPtr, "cudaTextureObject_t.getPtr(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_19cudaTextureObject_t_9getPtr = {"getPtr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_19cudaTextureObject_t_9getPtr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_19cudaTextureObject_t_8getPtr}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaTextureObject_t_9getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("getPtr (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("getPtr", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("getPtr", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_19cudaTextureObject_t_8getPtr(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureObject_t *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_19cudaTextureObject_t_8getPtr(struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureObject_t *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("getPtr", 0); /* "cuda/bindings/runtime.pyx":13726 * return self._pvt_ptr[0] * def getPtr(self): * return self._pvt_ptr # <<<<<<<<<<<<<< * * cdef class GLenum: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_v_self->_pvt_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13726, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13725 * def __int__(self): * return self._pvt_ptr[0] * def getPtr(self): # <<<<<<<<<<<<<< * return self._pvt_ptr * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureObject_t.getPtr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaTextureObject_t_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_19cudaTextureObject_t_10__reduce_cython__, "cudaTextureObject_t.__reduce_cython__(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_19cudaTextureObject_t_11__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_19cudaTextureObject_t_11__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_19cudaTextureObject_t_10__reduce_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaTextureObject_t_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_19cudaTextureObject_t_10__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureObject_t *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_19cudaTextureObject_t_10__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureObject_t *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureObject_t.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaTextureObject_t_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_19cudaTextureObject_t_12__setstate_cython__, "cudaTextureObject_t.__setstate_cython__(self, __pyx_state)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_19cudaTextureObject_t_13__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_19cudaTextureObject_t_13__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_19cudaTextureObject_t_12__setstate_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaTextureObject_t_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(2, 3, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(2, 3, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(2, 3, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) } __pyx_v___pyx_state = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(2, 3, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureObject_t.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_19cudaTextureObject_t_12__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureObject_t *)__pyx_v_self), __pyx_v___pyx_state); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_19cudaTextureObject_t_12__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureObject_t *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaTextureObject_t.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13737 * * """ * def __cinit__(self, unsigned int init_value = 0, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * if _ptr == 0: * self._pvt_ptr = &self._pvt_val */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_6GLenum_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_6GLenum_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { unsigned int __pyx_v_init_value; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr; CYTHON_UNUSED Py_ssize_t __pyx_nargs; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1; #endif __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_init_value,&__pyx_mstate_global->__pyx_n_u_ptr_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13737, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_VARARGS(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 13737, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13737, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__cinit__", 0) < (0)) __PYX_ERR(0, 13737, __pyx_L3_error) } else { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_VARARGS(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 13737, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13737, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } } if (values[0]) { __pyx_v_init_value = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_init_value == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13737, __pyx_L3_error) } else { __pyx_v_init_value = ((unsigned int)0); } if (values[1]) { __pyx_v__ptr = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[1]); if (unlikely((__pyx_v__ptr == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13737, __pyx_L3_error) } else { __pyx_v__ptr = ((__pyx_t_4cuda_8bindings_7runtime_void_ptr)0); } } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 0, 2, __pyx_nargs); __PYX_ERR(0, 13737, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.GLenum.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_6GLenum___cinit__(((struct __pyx_obj_4cuda_8bindings_7runtime_GLenum *)__pyx_v_self), __pyx_v_init_value, __pyx_v__ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_6GLenum___cinit__(struct __pyx_obj_4cuda_8bindings_7runtime_GLenum *__pyx_v_self, unsigned int __pyx_v_init_value, __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr) { int __pyx_r; int __pyx_t_1; /* "cuda/bindings/runtime.pyx":13738 * """ * def __cinit__(self, unsigned int init_value = 0, void_ptr _ptr = 0): * if _ptr == 0: # <<<<<<<<<<<<<< * self._pvt_ptr = &self._pvt_val * else: */ __pyx_t_1 = (__pyx_v__ptr == 0); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":13739 * def __cinit__(self, unsigned int init_value = 0, void_ptr _ptr = 0): * if _ptr == 0: * self._pvt_ptr = &self._pvt_val # <<<<<<<<<<<<<< * else: * self._pvt_ptr = _ptr */ __pyx_v_self->_pvt_ptr = (&__pyx_v_self->_pvt_val); /* "cuda/bindings/runtime.pyx":13738 * """ * def __cinit__(self, unsigned int init_value = 0, void_ptr _ptr = 0): * if _ptr == 0: # <<<<<<<<<<<<<< * self._pvt_ptr = &self._pvt_val * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":13741 * self._pvt_ptr = &self._pvt_val * else: * self._pvt_ptr = _ptr # <<<<<<<<<<<<<< * if init_value: * self._pvt_ptr[0] = init_value */ /*else*/ { __pyx_v_self->_pvt_ptr = ((__pyx_t_4cuda_8bindings_9cyruntime_GLenum *)__pyx_v__ptr); } __pyx_L3:; /* "cuda/bindings/runtime.pyx":13742 * else: * self._pvt_ptr = _ptr * if init_value: # <<<<<<<<<<<<<< * self._pvt_ptr[0] = init_value * def __dealloc__(self): */ __pyx_t_1 = (__pyx_v_init_value != 0); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":13743 * self._pvt_ptr = _ptr * if init_value: * self._pvt_ptr[0] = init_value # <<<<<<<<<<<<<< * def __dealloc__(self): * pass */ (__pyx_v_self->_pvt_ptr[0]) = __pyx_v_init_value; /* "cuda/bindings/runtime.pyx":13742 * else: * self._pvt_ptr = _ptr * if init_value: # <<<<<<<<<<<<<< * self._pvt_ptr[0] = init_value * def __dealloc__(self): */ } /* "cuda/bindings/runtime.pyx":13737 * * """ * def __cinit__(self, unsigned int init_value = 0, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * if _ptr == 0: * self._pvt_ptr = &self._pvt_val */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":13744 * if init_value: * self._pvt_ptr[0] = init_value * def __dealloc__(self): # <<<<<<<<<<<<<< * pass * def __repr__(self): */ /* Python wrapper */ static void __pyx_pw_4cuda_8bindings_7runtime_6GLenum_3__dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_pw_4cuda_8bindings_7runtime_6GLenum_3__dealloc__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_pf_4cuda_8bindings_7runtime_6GLenum_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_7runtime_GLenum *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_4cuda_8bindings_7runtime_6GLenum_2__dealloc__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_GLenum *__pyx_v_self) { /* function exit code */ } /* "cuda/bindings/runtime.pyx":13746 * def __dealloc__(self): * pass * def __repr__(self): # <<<<<<<<<<<<<< * return '' * def __int__(self): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_6GLenum_5__repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_6GLenum_5__repr__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_6GLenum_4__repr__(((struct __pyx_obj_4cuda_8bindings_7runtime_GLenum *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_6GLenum_4__repr__(struct __pyx_obj_4cuda_8bindings_7runtime_GLenum *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; size_t __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "cuda/bindings/runtime.pyx":13747 * pass * def __repr__(self): * return '' # <<<<<<<<<<<<<< * def __int__(self): * return self._pvt_ptr[0] */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = ((PyObject *)__pyx_v_self); __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCallMethod(__pyx_mstate_global->__pyx_n_u_int, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13747, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __pyx_t_2 = __Pyx_PyObject_Unicode(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13747, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_GLenum, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13747, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13747, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13746 * def __dealloc__(self): * pass * def __repr__(self): # <<<<<<<<<<<<<< * return '' * def __int__(self): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("cuda.bindings.runtime.GLenum.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13748 * def __repr__(self): * return '' * def __int__(self): # <<<<<<<<<<<<<< * return self._pvt_ptr[0] * def getPtr(self): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_6GLenum_7__int__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_6GLenum_7__int__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__int__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_6GLenum_6__int__(((struct __pyx_obj_4cuda_8bindings_7runtime_GLenum *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_6GLenum_6__int__(struct __pyx_obj_4cuda_8bindings_7runtime_GLenum *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__int__", 0); /* "cuda/bindings/runtime.pyx":13749 * return '' * def __int__(self): * return self._pvt_ptr[0] # <<<<<<<<<<<<<< * def getPtr(self): * return self._pvt_ptr */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_int(((unsigned int)(__pyx_v_self->_pvt_ptr[0]))); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13749, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13748 * def __repr__(self): * return '' * def __int__(self): # <<<<<<<<<<<<<< * return self._pvt_ptr[0] * def getPtr(self): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.GLenum.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13750 * def __int__(self): * return self._pvt_ptr[0] * def getPtr(self): # <<<<<<<<<<<<<< * return self._pvt_ptr * */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_6GLenum_9getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_6GLenum_8getPtr, "GLenum.getPtr(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_6GLenum_9getPtr = {"getPtr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_6GLenum_9getPtr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_6GLenum_8getPtr}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_6GLenum_9getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("getPtr (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("getPtr", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("getPtr", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_6GLenum_8getPtr(((struct __pyx_obj_4cuda_8bindings_7runtime_GLenum *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_6GLenum_8getPtr(struct __pyx_obj_4cuda_8bindings_7runtime_GLenum *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("getPtr", 0); /* "cuda/bindings/runtime.pyx":13751 * return self._pvt_ptr[0] * def getPtr(self): * return self._pvt_ptr # <<<<<<<<<<<<<< * * cdef class GLuint: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_v_self->_pvt_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13751, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13750 * def __int__(self): * return self._pvt_ptr[0] * def getPtr(self): # <<<<<<<<<<<<<< * return self._pvt_ptr * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.GLenum.getPtr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_6GLenum_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_6GLenum_10__reduce_cython__, "GLenum.__reduce_cython__(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_6GLenum_11__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_6GLenum_11__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_6GLenum_10__reduce_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_6GLenum_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_6GLenum_10__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_GLenum *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_6GLenum_10__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_GLenum *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.GLenum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_6GLenum_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_6GLenum_12__setstate_cython__, "GLenum.__setstate_cython__(self, __pyx_state)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_6GLenum_13__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_6GLenum_13__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_6GLenum_12__setstate_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_6GLenum_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(2, 3, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(2, 3, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(2, 3, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) } __pyx_v___pyx_state = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(2, 3, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.GLenum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_6GLenum_12__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_GLenum *)__pyx_v_self), __pyx_v___pyx_state); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_6GLenum_12__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_GLenum *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.GLenum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13762 * * """ * def __cinit__(self, unsigned int init_value = 0, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * if _ptr == 0: * self._pvt_ptr = &self._pvt_val */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_6GLuint_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_6GLuint_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { unsigned int __pyx_v_init_value; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr; CYTHON_UNUSED Py_ssize_t __pyx_nargs; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1; #endif __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_init_value,&__pyx_mstate_global->__pyx_n_u_ptr_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13762, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_VARARGS(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 13762, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13762, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__cinit__", 0) < (0)) __PYX_ERR(0, 13762, __pyx_L3_error) } else { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_VARARGS(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 13762, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13762, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } } if (values[0]) { __pyx_v_init_value = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_init_value == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13762, __pyx_L3_error) } else { __pyx_v_init_value = ((unsigned int)0); } if (values[1]) { __pyx_v__ptr = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[1]); if (unlikely((__pyx_v__ptr == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13762, __pyx_L3_error) } else { __pyx_v__ptr = ((__pyx_t_4cuda_8bindings_7runtime_void_ptr)0); } } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 0, 2, __pyx_nargs); __PYX_ERR(0, 13762, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.GLuint.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_6GLuint___cinit__(((struct __pyx_obj_4cuda_8bindings_7runtime_GLuint *)__pyx_v_self), __pyx_v_init_value, __pyx_v__ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_6GLuint___cinit__(struct __pyx_obj_4cuda_8bindings_7runtime_GLuint *__pyx_v_self, unsigned int __pyx_v_init_value, __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr) { int __pyx_r; int __pyx_t_1; /* "cuda/bindings/runtime.pyx":13763 * """ * def __cinit__(self, unsigned int init_value = 0, void_ptr _ptr = 0): * if _ptr == 0: # <<<<<<<<<<<<<< * self._pvt_ptr = &self._pvt_val * else: */ __pyx_t_1 = (__pyx_v__ptr == 0); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":13764 * def __cinit__(self, unsigned int init_value = 0, void_ptr _ptr = 0): * if _ptr == 0: * self._pvt_ptr = &self._pvt_val # <<<<<<<<<<<<<< * else: * self._pvt_ptr = _ptr */ __pyx_v_self->_pvt_ptr = (&__pyx_v_self->_pvt_val); /* "cuda/bindings/runtime.pyx":13763 * """ * def __cinit__(self, unsigned int init_value = 0, void_ptr _ptr = 0): * if _ptr == 0: # <<<<<<<<<<<<<< * self._pvt_ptr = &self._pvt_val * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":13766 * self._pvt_ptr = &self._pvt_val * else: * self._pvt_ptr = _ptr # <<<<<<<<<<<<<< * if init_value: * self._pvt_ptr[0] = init_value */ /*else*/ { __pyx_v_self->_pvt_ptr = ((__pyx_t_4cuda_8bindings_9cyruntime_GLuint *)__pyx_v__ptr); } __pyx_L3:; /* "cuda/bindings/runtime.pyx":13767 * else: * self._pvt_ptr = _ptr * if init_value: # <<<<<<<<<<<<<< * self._pvt_ptr[0] = init_value * def __dealloc__(self): */ __pyx_t_1 = (__pyx_v_init_value != 0); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":13768 * self._pvt_ptr = _ptr * if init_value: * self._pvt_ptr[0] = init_value # <<<<<<<<<<<<<< * def __dealloc__(self): * pass */ (__pyx_v_self->_pvt_ptr[0]) = __pyx_v_init_value; /* "cuda/bindings/runtime.pyx":13767 * else: * self._pvt_ptr = _ptr * if init_value: # <<<<<<<<<<<<<< * self._pvt_ptr[0] = init_value * def __dealloc__(self): */ } /* "cuda/bindings/runtime.pyx":13762 * * """ * def __cinit__(self, unsigned int init_value = 0, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * if _ptr == 0: * self._pvt_ptr = &self._pvt_val */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":13769 * if init_value: * self._pvt_ptr[0] = init_value * def __dealloc__(self): # <<<<<<<<<<<<<< * pass * def __repr__(self): */ /* Python wrapper */ static void __pyx_pw_4cuda_8bindings_7runtime_6GLuint_3__dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_pw_4cuda_8bindings_7runtime_6GLuint_3__dealloc__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_pf_4cuda_8bindings_7runtime_6GLuint_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_7runtime_GLuint *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_4cuda_8bindings_7runtime_6GLuint_2__dealloc__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_GLuint *__pyx_v_self) { /* function exit code */ } /* "cuda/bindings/runtime.pyx":13771 * def __dealloc__(self): * pass * def __repr__(self): # <<<<<<<<<<<<<< * return '' * def __int__(self): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_6GLuint_5__repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_6GLuint_5__repr__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_6GLuint_4__repr__(((struct __pyx_obj_4cuda_8bindings_7runtime_GLuint *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_6GLuint_4__repr__(struct __pyx_obj_4cuda_8bindings_7runtime_GLuint *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; size_t __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "cuda/bindings/runtime.pyx":13772 * pass * def __repr__(self): * return '' # <<<<<<<<<<<<<< * def __int__(self): * return self._pvt_ptr[0] */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = ((PyObject *)__pyx_v_self); __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCallMethod(__pyx_mstate_global->__pyx_n_u_int, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13772, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __pyx_t_2 = __Pyx_PyObject_Unicode(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13772, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_GLuint, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13772, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13772, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13771 * def __dealloc__(self): * pass * def __repr__(self): # <<<<<<<<<<<<<< * return '' * def __int__(self): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("cuda.bindings.runtime.GLuint.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13773 * def __repr__(self): * return '' * def __int__(self): # <<<<<<<<<<<<<< * return self._pvt_ptr[0] * def getPtr(self): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_6GLuint_7__int__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_6GLuint_7__int__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__int__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_6GLuint_6__int__(((struct __pyx_obj_4cuda_8bindings_7runtime_GLuint *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_6GLuint_6__int__(struct __pyx_obj_4cuda_8bindings_7runtime_GLuint *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__int__", 0); /* "cuda/bindings/runtime.pyx":13774 * return '' * def __int__(self): * return self._pvt_ptr[0] # <<<<<<<<<<<<<< * def getPtr(self): * return self._pvt_ptr */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_int(((unsigned int)(__pyx_v_self->_pvt_ptr[0]))); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13774, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13773 * def __repr__(self): * return '' * def __int__(self): # <<<<<<<<<<<<<< * return self._pvt_ptr[0] * def getPtr(self): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.GLuint.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13775 * def __int__(self): * return self._pvt_ptr[0] * def getPtr(self): # <<<<<<<<<<<<<< * return self._pvt_ptr * */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_6GLuint_9getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_6GLuint_8getPtr, "GLuint.getPtr(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_6GLuint_9getPtr = {"getPtr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_6GLuint_9getPtr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_6GLuint_8getPtr}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_6GLuint_9getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("getPtr (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("getPtr", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("getPtr", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_6GLuint_8getPtr(((struct __pyx_obj_4cuda_8bindings_7runtime_GLuint *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_6GLuint_8getPtr(struct __pyx_obj_4cuda_8bindings_7runtime_GLuint *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("getPtr", 0); /* "cuda/bindings/runtime.pyx":13776 * return self._pvt_ptr[0] * def getPtr(self): * return self._pvt_ptr # <<<<<<<<<<<<<< * * cdef class EGLint: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_v_self->_pvt_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13776, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13775 * def __int__(self): * return self._pvt_ptr[0] * def getPtr(self): # <<<<<<<<<<<<<< * return self._pvt_ptr * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.GLuint.getPtr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_6GLuint_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_6GLuint_10__reduce_cython__, "GLuint.__reduce_cython__(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_6GLuint_11__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_6GLuint_11__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_6GLuint_10__reduce_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_6GLuint_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_6GLuint_10__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_GLuint *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_6GLuint_10__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_GLuint *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.GLuint.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_6GLuint_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_6GLuint_12__setstate_cython__, "GLuint.__setstate_cython__(self, __pyx_state)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_6GLuint_13__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_6GLuint_13__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_6GLuint_12__setstate_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_6GLuint_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(2, 3, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(2, 3, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(2, 3, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) } __pyx_v___pyx_state = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(2, 3, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.GLuint.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_6GLuint_12__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_GLuint *)__pyx_v_self), __pyx_v___pyx_state); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_6GLuint_12__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_GLuint *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.GLuint.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13787 * * """ * def __cinit__(self, unsigned int init_value = 0, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * if _ptr == 0: * self._pvt_ptr = &self._pvt_val */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_6EGLint_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_6EGLint_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { unsigned int __pyx_v_init_value; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr; CYTHON_UNUSED Py_ssize_t __pyx_nargs; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1; #endif __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_init_value,&__pyx_mstate_global->__pyx_n_u_ptr_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13787, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_VARARGS(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 13787, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13787, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__cinit__", 0) < (0)) __PYX_ERR(0, 13787, __pyx_L3_error) } else { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_VARARGS(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 13787, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13787, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } } if (values[0]) { __pyx_v_init_value = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_init_value == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 13787, __pyx_L3_error) } else { __pyx_v_init_value = ((unsigned int)0); } if (values[1]) { __pyx_v__ptr = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[1]); if (unlikely((__pyx_v__ptr == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13787, __pyx_L3_error) } else { __pyx_v__ptr = ((__pyx_t_4cuda_8bindings_7runtime_void_ptr)0); } } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 0, 2, __pyx_nargs); __PYX_ERR(0, 13787, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.EGLint.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_6EGLint___cinit__(((struct __pyx_obj_4cuda_8bindings_7runtime_EGLint *)__pyx_v_self), __pyx_v_init_value, __pyx_v__ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_6EGLint___cinit__(struct __pyx_obj_4cuda_8bindings_7runtime_EGLint *__pyx_v_self, unsigned int __pyx_v_init_value, __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr) { int __pyx_r; int __pyx_t_1; /* "cuda/bindings/runtime.pyx":13788 * """ * def __cinit__(self, unsigned int init_value = 0, void_ptr _ptr = 0): * if _ptr == 0: # <<<<<<<<<<<<<< * self._pvt_ptr = &self._pvt_val * else: */ __pyx_t_1 = (__pyx_v__ptr == 0); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":13789 * def __cinit__(self, unsigned int init_value = 0, void_ptr _ptr = 0): * if _ptr == 0: * self._pvt_ptr = &self._pvt_val # <<<<<<<<<<<<<< * else: * self._pvt_ptr = _ptr */ __pyx_v_self->_pvt_ptr = (&__pyx_v_self->_pvt_val); /* "cuda/bindings/runtime.pyx":13788 * """ * def __cinit__(self, unsigned int init_value = 0, void_ptr _ptr = 0): * if _ptr == 0: # <<<<<<<<<<<<<< * self._pvt_ptr = &self._pvt_val * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":13791 * self._pvt_ptr = &self._pvt_val * else: * self._pvt_ptr = _ptr # <<<<<<<<<<<<<< * if init_value: * self._pvt_ptr[0] = init_value */ /*else*/ { __pyx_v_self->_pvt_ptr = ((__pyx_t_4cuda_8bindings_9cyruntime_EGLint *)__pyx_v__ptr); } __pyx_L3:; /* "cuda/bindings/runtime.pyx":13792 * else: * self._pvt_ptr = _ptr * if init_value: # <<<<<<<<<<<<<< * self._pvt_ptr[0] = init_value * def __dealloc__(self): */ __pyx_t_1 = (__pyx_v_init_value != 0); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":13793 * self._pvt_ptr = _ptr * if init_value: * self._pvt_ptr[0] = init_value # <<<<<<<<<<<<<< * def __dealloc__(self): * pass */ (__pyx_v_self->_pvt_ptr[0]) = __pyx_v_init_value; /* "cuda/bindings/runtime.pyx":13792 * else: * self._pvt_ptr = _ptr * if init_value: # <<<<<<<<<<<<<< * self._pvt_ptr[0] = init_value * def __dealloc__(self): */ } /* "cuda/bindings/runtime.pyx":13787 * * """ * def __cinit__(self, unsigned int init_value = 0, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * if _ptr == 0: * self._pvt_ptr = &self._pvt_val */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":13794 * if init_value: * self._pvt_ptr[0] = init_value * def __dealloc__(self): # <<<<<<<<<<<<<< * pass * def __repr__(self): */ /* Python wrapper */ static void __pyx_pw_4cuda_8bindings_7runtime_6EGLint_3__dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_pw_4cuda_8bindings_7runtime_6EGLint_3__dealloc__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_pf_4cuda_8bindings_7runtime_6EGLint_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_7runtime_EGLint *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_4cuda_8bindings_7runtime_6EGLint_2__dealloc__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_EGLint *__pyx_v_self) { /* function exit code */ } /* "cuda/bindings/runtime.pyx":13796 * def __dealloc__(self): * pass * def __repr__(self): # <<<<<<<<<<<<<< * return '' * def __int__(self): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_6EGLint_5__repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_6EGLint_5__repr__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_6EGLint_4__repr__(((struct __pyx_obj_4cuda_8bindings_7runtime_EGLint *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_6EGLint_4__repr__(struct __pyx_obj_4cuda_8bindings_7runtime_EGLint *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; size_t __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "cuda/bindings/runtime.pyx":13797 * pass * def __repr__(self): * return '' # <<<<<<<<<<<<<< * def __int__(self): * return self._pvt_ptr[0] */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = ((PyObject *)__pyx_v_self); __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCallMethod(__pyx_mstate_global->__pyx_n_u_int, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13797, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __pyx_t_2 = __Pyx_PyObject_Unicode(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13797, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_EGLint, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13797, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13797, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13796 * def __dealloc__(self): * pass * def __repr__(self): # <<<<<<<<<<<<<< * return '' * def __int__(self): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("cuda.bindings.runtime.EGLint.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13798 * def __repr__(self): * return '' * def __int__(self): # <<<<<<<<<<<<<< * return self._pvt_ptr[0] * def getPtr(self): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_6EGLint_7__int__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_6EGLint_7__int__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__int__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_6EGLint_6__int__(((struct __pyx_obj_4cuda_8bindings_7runtime_EGLint *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_6EGLint_6__int__(struct __pyx_obj_4cuda_8bindings_7runtime_EGLint *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__int__", 0); /* "cuda/bindings/runtime.pyx":13799 * return '' * def __int__(self): * return self._pvt_ptr[0] # <<<<<<<<<<<<<< * def getPtr(self): * return self._pvt_ptr */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_int(((unsigned int)(__pyx_v_self->_pvt_ptr[0]))); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13799, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13798 * def __repr__(self): * return '' * def __int__(self): # <<<<<<<<<<<<<< * return self._pvt_ptr[0] * def getPtr(self): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.EGLint.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13800 * def __int__(self): * return self._pvt_ptr[0] * def getPtr(self): # <<<<<<<<<<<<<< * return self._pvt_ptr * */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_6EGLint_9getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_6EGLint_8getPtr, "EGLint.getPtr(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_6EGLint_9getPtr = {"getPtr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_6EGLint_9getPtr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_6EGLint_8getPtr}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_6EGLint_9getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("getPtr (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("getPtr", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("getPtr", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_6EGLint_8getPtr(((struct __pyx_obj_4cuda_8bindings_7runtime_EGLint *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_6EGLint_8getPtr(struct __pyx_obj_4cuda_8bindings_7runtime_EGLint *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("getPtr", 0); /* "cuda/bindings/runtime.pyx":13801 * return self._pvt_ptr[0] * def getPtr(self): * return self._pvt_ptr # <<<<<<<<<<<<<< * * cdef class VdpDevice: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_v_self->_pvt_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13801, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13800 * def __int__(self): * return self._pvt_ptr[0] * def getPtr(self): # <<<<<<<<<<<<<< * return self._pvt_ptr * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.EGLint.getPtr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_6EGLint_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_6EGLint_10__reduce_cython__, "EGLint.__reduce_cython__(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_6EGLint_11__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_6EGLint_11__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_6EGLint_10__reduce_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_6EGLint_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_6EGLint_10__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_EGLint *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_6EGLint_10__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_EGLint *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.EGLint.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_6EGLint_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_6EGLint_12__setstate_cython__, "EGLint.__setstate_cython__(self, __pyx_state)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_6EGLint_13__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_6EGLint_13__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_6EGLint_12__setstate_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_6EGLint_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(2, 3, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(2, 3, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(2, 3, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) } __pyx_v___pyx_state = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(2, 3, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.EGLint.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_6EGLint_12__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_EGLint *)__pyx_v_self), __pyx_v___pyx_state); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_6EGLint_12__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_EGLint *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.EGLint.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13812 * * """ * def __cinit__(self, uint32_t init_value = 0, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * if _ptr == 0: * self._pvt_ptr = &self._pvt_val */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_9VdpDevice_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_9VdpDevice_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { uint32_t __pyx_v_init_value; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr; CYTHON_UNUSED Py_ssize_t __pyx_nargs; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1; #endif __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_init_value,&__pyx_mstate_global->__pyx_n_u_ptr_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13812, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_VARARGS(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 13812, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13812, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__cinit__", 0) < (0)) __PYX_ERR(0, 13812, __pyx_L3_error) } else { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_VARARGS(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 13812, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13812, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } } if (values[0]) { __pyx_v_init_value = __Pyx_PyLong_As_uint32_t(values[0]); if (unlikely((__pyx_v_init_value == ((uint32_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 13812, __pyx_L3_error) } else { __pyx_v_init_value = ((uint32_t)0); } if (values[1]) { __pyx_v__ptr = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[1]); if (unlikely((__pyx_v__ptr == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13812, __pyx_L3_error) } else { __pyx_v__ptr = ((__pyx_t_4cuda_8bindings_7runtime_void_ptr)0); } } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 0, 2, __pyx_nargs); __PYX_ERR(0, 13812, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.VdpDevice.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_9VdpDevice___cinit__(((struct __pyx_obj_4cuda_8bindings_7runtime_VdpDevice *)__pyx_v_self), __pyx_v_init_value, __pyx_v__ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_9VdpDevice___cinit__(struct __pyx_obj_4cuda_8bindings_7runtime_VdpDevice *__pyx_v_self, uint32_t __pyx_v_init_value, __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr) { int __pyx_r; int __pyx_t_1; /* "cuda/bindings/runtime.pyx":13813 * """ * def __cinit__(self, uint32_t init_value = 0, void_ptr _ptr = 0): * if _ptr == 0: # <<<<<<<<<<<<<< * self._pvt_ptr = &self._pvt_val * else: */ __pyx_t_1 = (__pyx_v__ptr == 0); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":13814 * def __cinit__(self, uint32_t init_value = 0, void_ptr _ptr = 0): * if _ptr == 0: * self._pvt_ptr = &self._pvt_val # <<<<<<<<<<<<<< * else: * self._pvt_ptr = _ptr */ __pyx_v_self->_pvt_ptr = (&__pyx_v_self->_pvt_val); /* "cuda/bindings/runtime.pyx":13813 * """ * def __cinit__(self, uint32_t init_value = 0, void_ptr _ptr = 0): * if _ptr == 0: # <<<<<<<<<<<<<< * self._pvt_ptr = &self._pvt_val * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":13816 * self._pvt_ptr = &self._pvt_val * else: * self._pvt_ptr = _ptr # <<<<<<<<<<<<<< * if init_value: * self._pvt_ptr[0] = init_value */ /*else*/ { __pyx_v_self->_pvt_ptr = ((__pyx_t_4cuda_8bindings_9cyruntime_VdpDevice *)__pyx_v__ptr); } __pyx_L3:; /* "cuda/bindings/runtime.pyx":13817 * else: * self._pvt_ptr = _ptr * if init_value: # <<<<<<<<<<<<<< * self._pvt_ptr[0] = init_value * def __dealloc__(self): */ __pyx_t_1 = (__pyx_v_init_value != 0); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":13818 * self._pvt_ptr = _ptr * if init_value: * self._pvt_ptr[0] = init_value # <<<<<<<<<<<<<< * def __dealloc__(self): * pass */ (__pyx_v_self->_pvt_ptr[0]) = __pyx_v_init_value; /* "cuda/bindings/runtime.pyx":13817 * else: * self._pvt_ptr = _ptr * if init_value: # <<<<<<<<<<<<<< * self._pvt_ptr[0] = init_value * def __dealloc__(self): */ } /* "cuda/bindings/runtime.pyx":13812 * * """ * def __cinit__(self, uint32_t init_value = 0, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * if _ptr == 0: * self._pvt_ptr = &self._pvt_val */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":13819 * if init_value: * self._pvt_ptr[0] = init_value * def __dealloc__(self): # <<<<<<<<<<<<<< * pass * def __repr__(self): */ /* Python wrapper */ static void __pyx_pw_4cuda_8bindings_7runtime_9VdpDevice_3__dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_pw_4cuda_8bindings_7runtime_9VdpDevice_3__dealloc__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_pf_4cuda_8bindings_7runtime_9VdpDevice_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_7runtime_VdpDevice *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_4cuda_8bindings_7runtime_9VdpDevice_2__dealloc__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_VdpDevice *__pyx_v_self) { /* function exit code */ } /* "cuda/bindings/runtime.pyx":13821 * def __dealloc__(self): * pass * def __repr__(self): # <<<<<<<<<<<<<< * return '' * def __int__(self): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_9VdpDevice_5__repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_9VdpDevice_5__repr__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_9VdpDevice_4__repr__(((struct __pyx_obj_4cuda_8bindings_7runtime_VdpDevice *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_9VdpDevice_4__repr__(struct __pyx_obj_4cuda_8bindings_7runtime_VdpDevice *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; size_t __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "cuda/bindings/runtime.pyx":13822 * pass * def __repr__(self): * return '' # <<<<<<<<<<<<<< * def __int__(self): * return self._pvt_ptr[0] */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = ((PyObject *)__pyx_v_self); __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCallMethod(__pyx_mstate_global->__pyx_n_u_int, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13822, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __pyx_t_2 = __Pyx_PyObject_Unicode(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13822, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_VdpDevice, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13822, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13822, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13821 * def __dealloc__(self): * pass * def __repr__(self): # <<<<<<<<<<<<<< * return '' * def __int__(self): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("cuda.bindings.runtime.VdpDevice.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13823 * def __repr__(self): * return '' * def __int__(self): # <<<<<<<<<<<<<< * return self._pvt_ptr[0] * def getPtr(self): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_9VdpDevice_7__int__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_9VdpDevice_7__int__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__int__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_9VdpDevice_6__int__(((struct __pyx_obj_4cuda_8bindings_7runtime_VdpDevice *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_9VdpDevice_6__int__(struct __pyx_obj_4cuda_8bindings_7runtime_VdpDevice *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__int__", 0); /* "cuda/bindings/runtime.pyx":13824 * return '' * def __int__(self): * return self._pvt_ptr[0] # <<<<<<<<<<<<<< * def getPtr(self): * return self._pvt_ptr */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_uint32_t(((uint32_t)(__pyx_v_self->_pvt_ptr[0]))); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13824, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13823 * def __repr__(self): * return '' * def __int__(self): # <<<<<<<<<<<<<< * return self._pvt_ptr[0] * def getPtr(self): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.VdpDevice.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13825 * def __int__(self): * return self._pvt_ptr[0] * def getPtr(self): # <<<<<<<<<<<<<< * return self._pvt_ptr * */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_9VdpDevice_9getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_9VdpDevice_8getPtr, "VdpDevice.getPtr(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_9VdpDevice_9getPtr = {"getPtr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_9VdpDevice_9getPtr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_9VdpDevice_8getPtr}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_9VdpDevice_9getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("getPtr (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("getPtr", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("getPtr", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_9VdpDevice_8getPtr(((struct __pyx_obj_4cuda_8bindings_7runtime_VdpDevice *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_9VdpDevice_8getPtr(struct __pyx_obj_4cuda_8bindings_7runtime_VdpDevice *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("getPtr", 0); /* "cuda/bindings/runtime.pyx":13826 * return self._pvt_ptr[0] * def getPtr(self): * return self._pvt_ptr # <<<<<<<<<<<<<< * * cdef class VdpGetProcAddress: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_v_self->_pvt_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13826, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13825 * def __int__(self): * return self._pvt_ptr[0] * def getPtr(self): # <<<<<<<<<<<<<< * return self._pvt_ptr * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.VdpDevice.getPtr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_9VdpDevice_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_9VdpDevice_10__reduce_cython__, "VdpDevice.__reduce_cython__(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_9VdpDevice_11__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_9VdpDevice_11__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_9VdpDevice_10__reduce_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_9VdpDevice_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_9VdpDevice_10__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_VdpDevice *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_9VdpDevice_10__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_VdpDevice *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.VdpDevice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_9VdpDevice_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_9VdpDevice_12__setstate_cython__, "VdpDevice.__setstate_cython__(self, __pyx_state)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_9VdpDevice_13__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_9VdpDevice_13__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_9VdpDevice_12__setstate_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_9VdpDevice_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(2, 3, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(2, 3, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(2, 3, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) } __pyx_v___pyx_state = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(2, 3, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.VdpDevice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_9VdpDevice_12__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_VdpDevice *)__pyx_v_self), __pyx_v___pyx_state); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_9VdpDevice_12__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_VdpDevice *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.VdpDevice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13837 * * """ * def __cinit__(self, unsigned long long init_value = 0, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * if _ptr == 0: * self._pvt_ptr = &self._pvt_val */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_17VdpGetProcAddress_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_17VdpGetProcAddress_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { unsigned PY_LONG_LONG __pyx_v_init_value; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr; CYTHON_UNUSED Py_ssize_t __pyx_nargs; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1; #endif __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_init_value,&__pyx_mstate_global->__pyx_n_u_ptr_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13837, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_VARARGS(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 13837, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13837, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__cinit__", 0) < (0)) __PYX_ERR(0, 13837, __pyx_L3_error) } else { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_VARARGS(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 13837, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13837, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } } if (values[0]) { __pyx_v_init_value = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[0]); if (unlikely((__pyx_v_init_value == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13837, __pyx_L3_error) } else { __pyx_v_init_value = ((unsigned PY_LONG_LONG)0); } if (values[1]) { __pyx_v__ptr = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[1]); if (unlikely((__pyx_v__ptr == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13837, __pyx_L3_error) } else { __pyx_v__ptr = ((__pyx_t_4cuda_8bindings_7runtime_void_ptr)0); } } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 0, 2, __pyx_nargs); __PYX_ERR(0, 13837, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.VdpGetProcAddress.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_17VdpGetProcAddress___cinit__(((struct __pyx_obj_4cuda_8bindings_7runtime_VdpGetProcAddress *)__pyx_v_self), __pyx_v_init_value, __pyx_v__ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_17VdpGetProcAddress___cinit__(struct __pyx_obj_4cuda_8bindings_7runtime_VdpGetProcAddress *__pyx_v_self, unsigned PY_LONG_LONG __pyx_v_init_value, __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr) { int __pyx_r; int __pyx_t_1; /* "cuda/bindings/runtime.pyx":13838 * """ * def __cinit__(self, unsigned long long init_value = 0, void_ptr _ptr = 0): * if _ptr == 0: # <<<<<<<<<<<<<< * self._pvt_ptr = &self._pvt_val * else: */ __pyx_t_1 = (__pyx_v__ptr == 0); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":13839 * def __cinit__(self, unsigned long long init_value = 0, void_ptr _ptr = 0): * if _ptr == 0: * self._pvt_ptr = &self._pvt_val # <<<<<<<<<<<<<< * else: * self._pvt_ptr = _ptr */ __pyx_v_self->_pvt_ptr = (&__pyx_v_self->_pvt_val); /* "cuda/bindings/runtime.pyx":13838 * """ * def __cinit__(self, unsigned long long init_value = 0, void_ptr _ptr = 0): * if _ptr == 0: # <<<<<<<<<<<<<< * self._pvt_ptr = &self._pvt_val * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":13841 * self._pvt_ptr = &self._pvt_val * else: * self._pvt_ptr = _ptr # <<<<<<<<<<<<<< * if init_value: * self._pvt_ptr[0] = init_value */ /*else*/ { __pyx_v_self->_pvt_ptr = ((__pyx_t_4cuda_8bindings_9cyruntime_VdpGetProcAddress *)__pyx_v__ptr); } __pyx_L3:; /* "cuda/bindings/runtime.pyx":13842 * else: * self._pvt_ptr = _ptr * if init_value: # <<<<<<<<<<<<<< * self._pvt_ptr[0] = init_value * def __dealloc__(self): */ __pyx_t_1 = (__pyx_v_init_value != 0); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":13843 * self._pvt_ptr = _ptr * if init_value: * self._pvt_ptr[0] = init_value # <<<<<<<<<<<<<< * def __dealloc__(self): * pass */ (__pyx_v_self->_pvt_ptr[0]) = __pyx_v_init_value; /* "cuda/bindings/runtime.pyx":13842 * else: * self._pvt_ptr = _ptr * if init_value: # <<<<<<<<<<<<<< * self._pvt_ptr[0] = init_value * def __dealloc__(self): */ } /* "cuda/bindings/runtime.pyx":13837 * * """ * def __cinit__(self, unsigned long long init_value = 0, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * if _ptr == 0: * self._pvt_ptr = &self._pvt_val */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":13844 * if init_value: * self._pvt_ptr[0] = init_value * def __dealloc__(self): # <<<<<<<<<<<<<< * pass * def __repr__(self): */ /* Python wrapper */ static void __pyx_pw_4cuda_8bindings_7runtime_17VdpGetProcAddress_3__dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_pw_4cuda_8bindings_7runtime_17VdpGetProcAddress_3__dealloc__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_pf_4cuda_8bindings_7runtime_17VdpGetProcAddress_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_7runtime_VdpGetProcAddress *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_4cuda_8bindings_7runtime_17VdpGetProcAddress_2__dealloc__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_VdpGetProcAddress *__pyx_v_self) { /* function exit code */ } /* "cuda/bindings/runtime.pyx":13846 * def __dealloc__(self): * pass * def __repr__(self): # <<<<<<<<<<<<<< * return '' * def __int__(self): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_17VdpGetProcAddress_5__repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_17VdpGetProcAddress_5__repr__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_17VdpGetProcAddress_4__repr__(((struct __pyx_obj_4cuda_8bindings_7runtime_VdpGetProcAddress *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_17VdpGetProcAddress_4__repr__(struct __pyx_obj_4cuda_8bindings_7runtime_VdpGetProcAddress *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; size_t __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "cuda/bindings/runtime.pyx":13847 * pass * def __repr__(self): * return '' # <<<<<<<<<<<<<< * def __int__(self): * return self._pvt_ptr[0] */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = ((PyObject *)__pyx_v_self); __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCallMethod(__pyx_mstate_global->__pyx_n_u_int, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13847, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __pyx_t_2 = __Pyx_PyObject_Unicode(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13847, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_VdpGetProcAddress, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13847, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13847, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13846 * def __dealloc__(self): * pass * def __repr__(self): # <<<<<<<<<<<<<< * return '' * def __int__(self): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("cuda.bindings.runtime.VdpGetProcAddress.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13848 * def __repr__(self): * return '' * def __int__(self): # <<<<<<<<<<<<<< * return self._pvt_ptr[0] * def getPtr(self): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_17VdpGetProcAddress_7__int__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_17VdpGetProcAddress_7__int__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__int__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_17VdpGetProcAddress_6__int__(((struct __pyx_obj_4cuda_8bindings_7runtime_VdpGetProcAddress *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_17VdpGetProcAddress_6__int__(struct __pyx_obj_4cuda_8bindings_7runtime_VdpGetProcAddress *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__int__", 0); /* "cuda/bindings/runtime.pyx":13849 * return '' * def __int__(self): * return self._pvt_ptr[0] # <<<<<<<<<<<<<< * def getPtr(self): * return self._pvt_ptr */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((unsigned PY_LONG_LONG)(__pyx_v_self->_pvt_ptr[0]))); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13849, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13848 * def __repr__(self): * return '' * def __int__(self): # <<<<<<<<<<<<<< * return self._pvt_ptr[0] * def getPtr(self): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.VdpGetProcAddress.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13850 * def __int__(self): * return self._pvt_ptr[0] * def getPtr(self): # <<<<<<<<<<<<<< * return self._pvt_ptr * */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_17VdpGetProcAddress_9getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_17VdpGetProcAddress_8getPtr, "VdpGetProcAddress.getPtr(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_17VdpGetProcAddress_9getPtr = {"getPtr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_17VdpGetProcAddress_9getPtr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_17VdpGetProcAddress_8getPtr}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_17VdpGetProcAddress_9getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("getPtr (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("getPtr", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("getPtr", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_17VdpGetProcAddress_8getPtr(((struct __pyx_obj_4cuda_8bindings_7runtime_VdpGetProcAddress *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_17VdpGetProcAddress_8getPtr(struct __pyx_obj_4cuda_8bindings_7runtime_VdpGetProcAddress *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("getPtr", 0); /* "cuda/bindings/runtime.pyx":13851 * return self._pvt_ptr[0] * def getPtr(self): * return self._pvt_ptr # <<<<<<<<<<<<<< * * cdef class VdpVideoSurface: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_v_self->_pvt_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13851, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13850 * def __int__(self): * return self._pvt_ptr[0] * def getPtr(self): # <<<<<<<<<<<<<< * return self._pvt_ptr * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.VdpGetProcAddress.getPtr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_17VdpGetProcAddress_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_17VdpGetProcAddress_10__reduce_cython__, "VdpGetProcAddress.__reduce_cython__(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_17VdpGetProcAddress_11__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_17VdpGetProcAddress_11__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_17VdpGetProcAddress_10__reduce_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_17VdpGetProcAddress_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_17VdpGetProcAddress_10__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_VdpGetProcAddress *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_17VdpGetProcAddress_10__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_VdpGetProcAddress *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.VdpGetProcAddress.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_17VdpGetProcAddress_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_17VdpGetProcAddress_12__setstate_cython__, "VdpGetProcAddress.__setstate_cython__(self, __pyx_state)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_17VdpGetProcAddress_13__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_17VdpGetProcAddress_13__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_17VdpGetProcAddress_12__setstate_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_17VdpGetProcAddress_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(2, 3, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(2, 3, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(2, 3, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) } __pyx_v___pyx_state = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(2, 3, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.VdpGetProcAddress.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_17VdpGetProcAddress_12__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_VdpGetProcAddress *)__pyx_v_self), __pyx_v___pyx_state); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_17VdpGetProcAddress_12__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_VdpGetProcAddress *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.VdpGetProcAddress.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13862 * * """ * def __cinit__(self, uint32_t init_value = 0, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * if _ptr == 0: * self._pvt_ptr = &self._pvt_val */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_15VdpVideoSurface_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_15VdpVideoSurface_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { uint32_t __pyx_v_init_value; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr; CYTHON_UNUSED Py_ssize_t __pyx_nargs; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1; #endif __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_init_value,&__pyx_mstate_global->__pyx_n_u_ptr_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13862, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_VARARGS(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 13862, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13862, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__cinit__", 0) < (0)) __PYX_ERR(0, 13862, __pyx_L3_error) } else { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_VARARGS(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 13862, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13862, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } } if (values[0]) { __pyx_v_init_value = __Pyx_PyLong_As_uint32_t(values[0]); if (unlikely((__pyx_v_init_value == ((uint32_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 13862, __pyx_L3_error) } else { __pyx_v_init_value = ((uint32_t)0); } if (values[1]) { __pyx_v__ptr = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[1]); if (unlikely((__pyx_v__ptr == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13862, __pyx_L3_error) } else { __pyx_v__ptr = ((__pyx_t_4cuda_8bindings_7runtime_void_ptr)0); } } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 0, 2, __pyx_nargs); __PYX_ERR(0, 13862, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.VdpVideoSurface.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15VdpVideoSurface___cinit__(((struct __pyx_obj_4cuda_8bindings_7runtime_VdpVideoSurface *)__pyx_v_self), __pyx_v_init_value, __pyx_v__ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_15VdpVideoSurface___cinit__(struct __pyx_obj_4cuda_8bindings_7runtime_VdpVideoSurface *__pyx_v_self, uint32_t __pyx_v_init_value, __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr) { int __pyx_r; int __pyx_t_1; /* "cuda/bindings/runtime.pyx":13863 * """ * def __cinit__(self, uint32_t init_value = 0, void_ptr _ptr = 0): * if _ptr == 0: # <<<<<<<<<<<<<< * self._pvt_ptr = &self._pvt_val * else: */ __pyx_t_1 = (__pyx_v__ptr == 0); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":13864 * def __cinit__(self, uint32_t init_value = 0, void_ptr _ptr = 0): * if _ptr == 0: * self._pvt_ptr = &self._pvt_val # <<<<<<<<<<<<<< * else: * self._pvt_ptr = _ptr */ __pyx_v_self->_pvt_ptr = (&__pyx_v_self->_pvt_val); /* "cuda/bindings/runtime.pyx":13863 * """ * def __cinit__(self, uint32_t init_value = 0, void_ptr _ptr = 0): * if _ptr == 0: # <<<<<<<<<<<<<< * self._pvt_ptr = &self._pvt_val * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":13866 * self._pvt_ptr = &self._pvt_val * else: * self._pvt_ptr = _ptr # <<<<<<<<<<<<<< * if init_value: * self._pvt_ptr[0] = init_value */ /*else*/ { __pyx_v_self->_pvt_ptr = ((__pyx_t_4cuda_8bindings_9cyruntime_VdpVideoSurface *)__pyx_v__ptr); } __pyx_L3:; /* "cuda/bindings/runtime.pyx":13867 * else: * self._pvt_ptr = _ptr * if init_value: # <<<<<<<<<<<<<< * self._pvt_ptr[0] = init_value * def __dealloc__(self): */ __pyx_t_1 = (__pyx_v_init_value != 0); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":13868 * self._pvt_ptr = _ptr * if init_value: * self._pvt_ptr[0] = init_value # <<<<<<<<<<<<<< * def __dealloc__(self): * pass */ (__pyx_v_self->_pvt_ptr[0]) = __pyx_v_init_value; /* "cuda/bindings/runtime.pyx":13867 * else: * self._pvt_ptr = _ptr * if init_value: # <<<<<<<<<<<<<< * self._pvt_ptr[0] = init_value * def __dealloc__(self): */ } /* "cuda/bindings/runtime.pyx":13862 * * """ * def __cinit__(self, uint32_t init_value = 0, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * if _ptr == 0: * self._pvt_ptr = &self._pvt_val */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":13869 * if init_value: * self._pvt_ptr[0] = init_value * def __dealloc__(self): # <<<<<<<<<<<<<< * pass * def __repr__(self): */ /* Python wrapper */ static void __pyx_pw_4cuda_8bindings_7runtime_15VdpVideoSurface_3__dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_pw_4cuda_8bindings_7runtime_15VdpVideoSurface_3__dealloc__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_pf_4cuda_8bindings_7runtime_15VdpVideoSurface_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_7runtime_VdpVideoSurface *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_4cuda_8bindings_7runtime_15VdpVideoSurface_2__dealloc__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_VdpVideoSurface *__pyx_v_self) { /* function exit code */ } /* "cuda/bindings/runtime.pyx":13871 * def __dealloc__(self): * pass * def __repr__(self): # <<<<<<<<<<<<<< * return '' * def __int__(self): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15VdpVideoSurface_5__repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15VdpVideoSurface_5__repr__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15VdpVideoSurface_4__repr__(((struct __pyx_obj_4cuda_8bindings_7runtime_VdpVideoSurface *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_15VdpVideoSurface_4__repr__(struct __pyx_obj_4cuda_8bindings_7runtime_VdpVideoSurface *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; size_t __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "cuda/bindings/runtime.pyx":13872 * pass * def __repr__(self): * return '' # <<<<<<<<<<<<<< * def __int__(self): * return self._pvt_ptr[0] */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = ((PyObject *)__pyx_v_self); __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCallMethod(__pyx_mstate_global->__pyx_n_u_int, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13872, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __pyx_t_2 = __Pyx_PyObject_Unicode(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13872, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_VdpVideoSurface, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13872, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13872, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13871 * def __dealloc__(self): * pass * def __repr__(self): # <<<<<<<<<<<<<< * return '' * def __int__(self): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("cuda.bindings.runtime.VdpVideoSurface.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13873 * def __repr__(self): * return '' * def __int__(self): # <<<<<<<<<<<<<< * return self._pvt_ptr[0] * def getPtr(self): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15VdpVideoSurface_7__int__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15VdpVideoSurface_7__int__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__int__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15VdpVideoSurface_6__int__(((struct __pyx_obj_4cuda_8bindings_7runtime_VdpVideoSurface *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_15VdpVideoSurface_6__int__(struct __pyx_obj_4cuda_8bindings_7runtime_VdpVideoSurface *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__int__", 0); /* "cuda/bindings/runtime.pyx":13874 * return '' * def __int__(self): * return self._pvt_ptr[0] # <<<<<<<<<<<<<< * def getPtr(self): * return self._pvt_ptr */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_uint32_t(((uint32_t)(__pyx_v_self->_pvt_ptr[0]))); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13874, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13873 * def __repr__(self): * return '' * def __int__(self): # <<<<<<<<<<<<<< * return self._pvt_ptr[0] * def getPtr(self): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.VdpVideoSurface.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13875 * def __int__(self): * return self._pvt_ptr[0] * def getPtr(self): # <<<<<<<<<<<<<< * return self._pvt_ptr * */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15VdpVideoSurface_9getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_15VdpVideoSurface_8getPtr, "VdpVideoSurface.getPtr(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_15VdpVideoSurface_9getPtr = {"getPtr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_15VdpVideoSurface_9getPtr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_15VdpVideoSurface_8getPtr}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15VdpVideoSurface_9getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("getPtr (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("getPtr", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("getPtr", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15VdpVideoSurface_8getPtr(((struct __pyx_obj_4cuda_8bindings_7runtime_VdpVideoSurface *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_15VdpVideoSurface_8getPtr(struct __pyx_obj_4cuda_8bindings_7runtime_VdpVideoSurface *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("getPtr", 0); /* "cuda/bindings/runtime.pyx":13876 * return self._pvt_ptr[0] * def getPtr(self): * return self._pvt_ptr # <<<<<<<<<<<<<< * * cdef class VdpOutputSurface: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_v_self->_pvt_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13876, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13875 * def __int__(self): * return self._pvt_ptr[0] * def getPtr(self): # <<<<<<<<<<<<<< * return self._pvt_ptr * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.VdpVideoSurface.getPtr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15VdpVideoSurface_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_15VdpVideoSurface_10__reduce_cython__, "VdpVideoSurface.__reduce_cython__(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_15VdpVideoSurface_11__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_15VdpVideoSurface_11__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_15VdpVideoSurface_10__reduce_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15VdpVideoSurface_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15VdpVideoSurface_10__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_VdpVideoSurface *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_15VdpVideoSurface_10__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_VdpVideoSurface *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.VdpVideoSurface.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15VdpVideoSurface_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_15VdpVideoSurface_12__setstate_cython__, "VdpVideoSurface.__setstate_cython__(self, __pyx_state)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_15VdpVideoSurface_13__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_15VdpVideoSurface_13__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_15VdpVideoSurface_12__setstate_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15VdpVideoSurface_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(2, 3, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(2, 3, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(2, 3, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) } __pyx_v___pyx_state = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(2, 3, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.VdpVideoSurface.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_15VdpVideoSurface_12__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_VdpVideoSurface *)__pyx_v_self), __pyx_v___pyx_state); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_15VdpVideoSurface_12__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_VdpVideoSurface *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.VdpVideoSurface.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13887 * * """ * def __cinit__(self, uint32_t init_value = 0, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * if _ptr == 0: * self._pvt_ptr = &self._pvt_val */ /* Python wrapper */ static int __pyx_pw_4cuda_8bindings_7runtime_16VdpOutputSurface_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_pw_4cuda_8bindings_7runtime_16VdpOutputSurface_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { uint32_t __pyx_v_init_value; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr; CYTHON_UNUSED Py_ssize_t __pyx_nargs; CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return -1; #endif __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_init_value,&__pyx_mstate_global->__pyx_n_u_ptr_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_VARARGS(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13887, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_VARARGS(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 13887, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13887, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__cinit__", 0) < (0)) __PYX_ERR(0, 13887, __pyx_L3_error) } else { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_VARARGS(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 13887, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_VARARGS(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13887, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } } if (values[0]) { __pyx_v_init_value = __Pyx_PyLong_As_uint32_t(values[0]); if (unlikely((__pyx_v_init_value == ((uint32_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 13887, __pyx_L3_error) } else { __pyx_v_init_value = ((uint32_t)0); } if (values[1]) { __pyx_v__ptr = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[1]); if (unlikely((__pyx_v__ptr == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 13887, __pyx_L3_error) } else { __pyx_v__ptr = ((__pyx_t_4cuda_8bindings_7runtime_void_ptr)0); } } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 0, 2, __pyx_nargs); __PYX_ERR(0, 13887, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.VdpOutputSurface.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_16VdpOutputSurface___cinit__(((struct __pyx_obj_4cuda_8bindings_7runtime_VdpOutputSurface *)__pyx_v_self), __pyx_v_init_value, __pyx_v__ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_4cuda_8bindings_7runtime_16VdpOutputSurface___cinit__(struct __pyx_obj_4cuda_8bindings_7runtime_VdpOutputSurface *__pyx_v_self, uint32_t __pyx_v_init_value, __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v__ptr) { int __pyx_r; int __pyx_t_1; /* "cuda/bindings/runtime.pyx":13888 * """ * def __cinit__(self, uint32_t init_value = 0, void_ptr _ptr = 0): * if _ptr == 0: # <<<<<<<<<<<<<< * self._pvt_ptr = &self._pvt_val * else: */ __pyx_t_1 = (__pyx_v__ptr == 0); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":13889 * def __cinit__(self, uint32_t init_value = 0, void_ptr _ptr = 0): * if _ptr == 0: * self._pvt_ptr = &self._pvt_val # <<<<<<<<<<<<<< * else: * self._pvt_ptr = _ptr */ __pyx_v_self->_pvt_ptr = (&__pyx_v_self->_pvt_val); /* "cuda/bindings/runtime.pyx":13888 * """ * def __cinit__(self, uint32_t init_value = 0, void_ptr _ptr = 0): * if _ptr == 0: # <<<<<<<<<<<<<< * self._pvt_ptr = &self._pvt_val * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":13891 * self._pvt_ptr = &self._pvt_val * else: * self._pvt_ptr = _ptr # <<<<<<<<<<<<<< * if init_value: * self._pvt_ptr[0] = init_value */ /*else*/ { __pyx_v_self->_pvt_ptr = ((__pyx_t_4cuda_8bindings_9cyruntime_VdpOutputSurface *)__pyx_v__ptr); } __pyx_L3:; /* "cuda/bindings/runtime.pyx":13892 * else: * self._pvt_ptr = _ptr * if init_value: # <<<<<<<<<<<<<< * self._pvt_ptr[0] = init_value * def __dealloc__(self): */ __pyx_t_1 = (__pyx_v_init_value != 0); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":13893 * self._pvt_ptr = _ptr * if init_value: * self._pvt_ptr[0] = init_value # <<<<<<<<<<<<<< * def __dealloc__(self): * pass */ (__pyx_v_self->_pvt_ptr[0]) = __pyx_v_init_value; /* "cuda/bindings/runtime.pyx":13892 * else: * self._pvt_ptr = _ptr * if init_value: # <<<<<<<<<<<<<< * self._pvt_ptr[0] = init_value * def __dealloc__(self): */ } /* "cuda/bindings/runtime.pyx":13887 * * """ * def __cinit__(self, uint32_t init_value = 0, void_ptr _ptr = 0): # <<<<<<<<<<<<<< * if _ptr == 0: * self._pvt_ptr = &self._pvt_val */ /* function exit code */ __pyx_r = 0; return __pyx_r; } /* "cuda/bindings/runtime.pyx":13894 * if init_value: * self._pvt_ptr[0] = init_value * def __dealloc__(self): # <<<<<<<<<<<<<< * pass * def __repr__(self): */ /* Python wrapper */ static void __pyx_pw_4cuda_8bindings_7runtime_16VdpOutputSurface_3__dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_pw_4cuda_8bindings_7runtime_16VdpOutputSurface_3__dealloc__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_pf_4cuda_8bindings_7runtime_16VdpOutputSurface_2__dealloc__(((struct __pyx_obj_4cuda_8bindings_7runtime_VdpOutputSurface *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_4cuda_8bindings_7runtime_16VdpOutputSurface_2__dealloc__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_VdpOutputSurface *__pyx_v_self) { /* function exit code */ } /* "cuda/bindings/runtime.pyx":13896 * def __dealloc__(self): * pass * def __repr__(self): # <<<<<<<<<<<<<< * return '' * def __int__(self): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_16VdpOutputSurface_5__repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_16VdpOutputSurface_5__repr__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_16VdpOutputSurface_4__repr__(((struct __pyx_obj_4cuda_8bindings_7runtime_VdpOutputSurface *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_16VdpOutputSurface_4__repr__(struct __pyx_obj_4cuda_8bindings_7runtime_VdpOutputSurface *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; size_t __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "cuda/bindings/runtime.pyx":13897 * pass * def __repr__(self): * return '' # <<<<<<<<<<<<<< * def __int__(self): * return self._pvt_ptr[0] */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = ((PyObject *)__pyx_v_self); __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCallMethod(__pyx_mstate_global->__pyx_n_u_int, __pyx_callargs+__pyx_t_3, (1-__pyx_t_3) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13897, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __pyx_t_2 = __Pyx_PyObject_Unicode(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13897, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_VdpOutputSurface, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13897, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13897, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13896 * def __dealloc__(self): * pass * def __repr__(self): # <<<<<<<<<<<<<< * return '' * def __int__(self): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("cuda.bindings.runtime.VdpOutputSurface.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13898 * def __repr__(self): * return '' * def __int__(self): # <<<<<<<<<<<<<< * return self._pvt_ptr[0] * def getPtr(self): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_16VdpOutputSurface_7__int__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_16VdpOutputSurface_7__int__(PyObject *__pyx_v_self) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__int__ (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_16VdpOutputSurface_6__int__(((struct __pyx_obj_4cuda_8bindings_7runtime_VdpOutputSurface *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_16VdpOutputSurface_6__int__(struct __pyx_obj_4cuda_8bindings_7runtime_VdpOutputSurface *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__int__", 0); /* "cuda/bindings/runtime.pyx":13899 * return '' * def __int__(self): * return self._pvt_ptr[0] # <<<<<<<<<<<<<< * def getPtr(self): * return self._pvt_ptr */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_uint32_t(((uint32_t)(__pyx_v_self->_pvt_ptr[0]))); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13899, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13898 * def __repr__(self): * return '' * def __int__(self): # <<<<<<<<<<<<<< * return self._pvt_ptr[0] * def getPtr(self): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.VdpOutputSurface.__int__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13900 * def __int__(self): * return self._pvt_ptr[0] * def getPtr(self): # <<<<<<<<<<<<<< * return self._pvt_ptr * */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_16VdpOutputSurface_9getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_16VdpOutputSurface_8getPtr, "VdpOutputSurface.getPtr(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_16VdpOutputSurface_9getPtr = {"getPtr", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_16VdpOutputSurface_9getPtr, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_16VdpOutputSurface_8getPtr}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_16VdpOutputSurface_9getPtr(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("getPtr (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("getPtr", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("getPtr", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_16VdpOutputSurface_8getPtr(((struct __pyx_obj_4cuda_8bindings_7runtime_VdpOutputSurface *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_16VdpOutputSurface_8getPtr(struct __pyx_obj_4cuda_8bindings_7runtime_VdpOutputSurface *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("getPtr", 0); /* "cuda/bindings/runtime.pyx":13901 * return self._pvt_ptr[0] * def getPtr(self): * return self._pvt_ptr # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_v_self->_pvt_ptr)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13901, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13900 * def __int__(self): * return self._pvt_ptr[0] * def getPtr(self): # <<<<<<<<<<<<<< * return self._pvt_ptr * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("cuda.bindings.runtime.VdpOutputSurface.getPtr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_16VdpOutputSurface_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_16VdpOutputSurface_10__reduce_cython__, "VdpOutputSurface.__reduce_cython__(self)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_16VdpOutputSurface_11__reduce_cython__ = {"__reduce_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_16VdpOutputSurface_11__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_16VdpOutputSurface_10__reduce_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_16VdpOutputSurface_11__reduce_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); if (unlikely(__pyx_nargs > 0)) { __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL; } const Py_ssize_t __pyx_kwds_len = unlikely(__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len < 0)) return NULL; if (unlikely(__pyx_kwds_len > 0)) {__Pyx_RejectKeywords("__reduce_cython__", __pyx_kwds); return NULL;} __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_16VdpOutputSurface_10__reduce_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_VdpOutputSurface *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_16VdpOutputSurface_10__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_VdpOutputSurface *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.VdpOutputSurface.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_16VdpOutputSurface_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_16VdpOutputSurface_12__setstate_cython__, "VdpOutputSurface.__setstate_cython__(self, __pyx_state)"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_16VdpOutputSurface_13__setstate_cython__ = {"__setstate_cython__", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_16VdpOutputSurface_13__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_16VdpOutputSurface_12__setstate_cython__}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_16VdpOutputSurface_13__setstate_cython__(PyObject *__pyx_v_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pyx_state,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(2, 3, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "__setstate_cython__", 0) < (0)) __PYX_ERR(2, 3, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, i); __PYX_ERR(2, 3, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(2, 3, __pyx_L3_error) } __pyx_v___pyx_state = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(2, 3, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.VdpOutputSurface.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_16VdpOutputSurface_12__setstate_cython__(((struct __pyx_obj_4cuda_8bindings_7runtime_VdpOutputSurface *)__pyx_v_self), __pyx_v___pyx_state); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_16VdpOutputSurface_12__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_4cuda_8bindings_7runtime_VdpOutputSurface *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< */ __Pyx_Raise(__pyx_builtin_TypeError, __pyx_mstate_global->__pyx_kp_u_no_default___reduce___due_to_non, 0, 0); __PYX_ERR(2, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" */ /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.VdpOutputSurface.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13903 * return self._pvt_ptr * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceReset(): * """ Destroy all allocations and reset all state on the current device in the current process. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_1cudaDeviceReset(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_cudaDeviceReset, "cudaDeviceReset()\n\nDestroy all allocations and reset all state on the current device in the current process.\n\nExplicitly destroys and cleans up all resources associated with the\ncurrent device in the current process. It is the caller's\nresponsibility to ensure that the resources are not accessed or passed\nin subsequent API calls and doing so will result in undefined behavior.\nThese resources include CUDA types :py:obj:`~.cudaStream_t`,\n:py:obj:`~.cudaEvent_t`, :py:obj:`~.cudaArray_t`,\n:py:obj:`~.cudaMipmappedArray_t`, :py:obj:`~.cudaPitchedPtr`,\n:py:obj:`~.cudaTextureObject_t`, :py:obj:`~.cudaSurfaceObject_t`,\n:py:obj:`~.textureReference`, :py:obj:`~.surfaceReference`,\n:py:obj:`~.cudaExternalMemory_t`, :py:obj:`~.cudaExternalSemaphore_t`\nand :py:obj:`~.cudaGraphicsResource_t`. These resources also include\nmemory allocations by :py:obj:`~.cudaMalloc`,\n:py:obj:`~.cudaMallocHost`, :py:obj:`~.cudaMallocManaged` and\n:py:obj:`~.cudaMallocPitch`. Any subsequent API call to this device\nwill reinitialize the device.\n\nNote that this function will reset the device immediately. It is the\ncaller's responsibility to ensure that the device is not being accessed\nby any other host threads from the process when this function is\ncalled.\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`\n\nSee Also\n--------\n:py:obj:`~.cudaDeviceSynchronize`\n\nNotes\n-----\n:py:obj:`~.cudaDeviceReset()` will not destroy memory allocations by :py:obj:`~.cudaMallocAsync()` and :py:obj:`~.cudaMallocFromPoolAsync()`. These memory allocations need to be destroyed explicitly.\n\nIf a non-primary :py:obj:`~.CUcontext` is current to the thread, :py:obj:`~.cudaDeviceReset()` will destroy only the internal CUDA RT state for that :py:obj:`~.CUcontext`."); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_1cudaDeviceReset = {"cudaDeviceReset", (PyCFunction)__pyx_pw_4cuda_8bindings_7runtime_1cudaDeviceReset, METH_NOARGS, __pyx_doc_4cuda_8bindings_7runtime_cudaDeviceReset}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_1cudaDeviceReset(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaDeviceReset (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_cudaDeviceReset(__pyx_self); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_cudaDeviceReset(CYTHON_UNUSED PyObject *__pyx_self) { cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations cudaError_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaDeviceReset", 0); /* "cuda/bindings/runtime.pyx":13943 * If a non-primary :py:obj:`~.CUcontext` is current to the thread, :py:obj:`~.cudaDeviceReset()` will destroy only the internal CUDA RT state for that :py:obj:`~.CUcontext`. * """ * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceReset() * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":13944 * """ * with nogil: * err = cyruntime.cudaDeviceReset() # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_1 = __pyx_f_4cuda_8bindings_9cyruntime_cudaDeviceReset(); if (unlikely(__pyx_t_1 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 13944, __pyx_L4_error) __pyx_v_err = __pyx_t_1; } /* "cuda/bindings/runtime.pyx":13943 * If a non-primary :py:obj:`~.CUcontext` is current to the thread, :py:obj:`~.cudaDeviceReset()` will destroy only the internal CUDA RT state for that :py:obj:`~.CUcontext`. * """ * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceReset() * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":13945 * with nogil: * err = cyruntime.cudaDeviceReset() * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13945, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13945, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13945, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13945, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 13945, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13903 * return self._pvt_ptr * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceReset(): * """ Destroy all allocations and reset all state on the current device in the current process. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceReset", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13947 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceSynchronize(): * """ Wait for compute device to finish. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_3cudaDeviceSynchronize(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_2cudaDeviceSynchronize, "cudaDeviceSynchronize()\n\nWait for compute device to finish.\n\nBlocks until the device has completed all preceding requested tasks.\n:py:obj:`~.cudaDeviceSynchronize()` returns an error if one of the\npreceding tasks has failed. If the\n:py:obj:`~.cudaDeviceScheduleBlockingSync` flag was set for this\ndevice, the host thread will block until the device has finished its\nwork.\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`\n\nSee Also\n--------\n:py:obj:`~.cudaDeviceReset`, :py:obj:`~.cuCtxSynchronize`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_3cudaDeviceSynchronize = {"cudaDeviceSynchronize", (PyCFunction)__pyx_pw_4cuda_8bindings_7runtime_3cudaDeviceSynchronize, METH_NOARGS, __pyx_doc_4cuda_8bindings_7runtime_2cudaDeviceSynchronize}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_3cudaDeviceSynchronize(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaDeviceSynchronize (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_2cudaDeviceSynchronize(__pyx_self); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_2cudaDeviceSynchronize(CYTHON_UNUSED PyObject *__pyx_self) { cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations cudaError_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaDeviceSynchronize", 0); /* "cuda/bindings/runtime.pyx":13967 * :py:obj:`~.cudaDeviceReset`, :py:obj:`~.cuCtxSynchronize` * """ * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceSynchronize() * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":13968 * """ * with nogil: * err = cyruntime.cudaDeviceSynchronize() # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_1 = __pyx_f_4cuda_8bindings_9cyruntime_cudaDeviceSynchronize(); if (unlikely(__pyx_t_1 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 13968, __pyx_L4_error) __pyx_v_err = __pyx_t_1; } /* "cuda/bindings/runtime.pyx":13967 * :py:obj:`~.cudaDeviceReset`, :py:obj:`~.cuCtxSynchronize` * """ * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceSynchronize() * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":13969 * with nogil: * err = cyruntime.cudaDeviceSynchronize() * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13969, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13969, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 13969, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13969, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 13969, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13947 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceSynchronize(): * """ Wait for compute device to finish. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceSynchronize", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":13971 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceSetLimit(limit not None : cudaLimit, size_t value): * """ Set resource limits. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_5cudaDeviceSetLimit(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_4cudaDeviceSetLimit, "cudaDeviceSetLimit(limit: cudaLimit, size_t value)\n\nSet resource limits.\n\nSetting `limit` to `value` is a request by the application to update\nthe current limit maintained by the device. The driver is free to\nmodify the requested value to meet h/w requirements (this could be\nclamping to minimum or maximum values, rounding up to nearest element\nsize, etc). The application can use :py:obj:`~.cudaDeviceGetLimit()` to\nfind out exactly what the limit has been set to.\n\nSetting each :py:obj:`~.cudaLimit` has its own specific restrictions,\nso each is discussed here.\n\n- :py:obj:`~.cudaLimitStackSize` controls the stack size in bytes of\n each GPU thread.\n\n- :py:obj:`~.cudaLimitPrintfFifoSize` controls the size in bytes of the\n shared FIFO used by the :py:obj:`~.printf()` device system call.\n Setting :py:obj:`~.cudaLimitPrintfFifoSize` must not be performed\n after launching any kernel that uses the :py:obj:`~.printf()` device\n system call - in such case :py:obj:`~.cudaErrorInvalidValue` will be\n returned.\n\n- :py:obj:`~.cudaLimitMallocHeapSize` controls the size in bytes of the\n heap used by the :py:obj:`~.malloc()` and :py:obj:`~.free()` device\n system calls. Setting :py:obj:`~.cudaLimitMallocHeapSize` must not be\n performed after launching any kernel that uses the\n :py:obj:`~.malloc()` or :py:obj:`~.free()` device system calls - in\n such case :py:obj:`~.cudaErrorInvalidValue` will be returned.\n\n- :py:obj:`~.cudaLimitDevRuntimeSyncDepth` controls the maximum nesting\n depth of a grid at which a thread can safely call\n :py:obj:`~.cudaDeviceSynchronize()`. Setting this limit must be\n performed before any launch of a kernel that uses the device runtime\n and calls :py:obj:`~.cudaDeviceSynchronize()` above the default sync\n depth, two levels of grids. Calls to\n :py:obj:`~.cudaDeviceSynchronize()` will fail with error code\n :py:obj:`~.cudaErrorSyncDepthExceeded` if the limitation is violated.\n This limit can be set smaller tha""n the default or up the maximum\n launch depth of 24. When setting this limit, keep in mind that\n additional levels of sync depth require the runtime to reserve large\n amounts of device memory which can no longer be used for user\n allocations. If these reservations of device memory fail,\n :py:obj:`~.cudaDeviceSetLimit` will return\n :py:obj:`~.cudaErrorMemoryAllocation`, and the limit can be reset to\n a lower value. This limit is only applicable to devices of compute\n capability < 9.0. Attempting to set this limit on devices of other\n compute capability will results in error\n :py:obj:`~.cudaErrorUnsupportedLimit` being returned.\n\n- :py:obj:`~.cudaLimitDevRuntimePendingLaunchCount` controls the\n maximum number of outstanding device runtime launches that can be\n made from the current device. A grid is outstanding from the point of\n launch up until the grid is known to have been completed. Device\n runtime launches which violate this limitation fail and return\n :py:obj:`~.cudaErrorLaunchPendingCountExceeded` when\n :py:obj:`~.cudaGetLastError()` is called after launch. If more\n pending launches than the default (2048 launches) are needed for a\n module using the device runtime, this limit can be increased. Keep in\n mind that being able to sustain additional pending launches will\n require the runtime to reserve larger amounts of device memory\n upfront which can no longer be used for allocations. If these\n reservations fail, :py:obj:`~.cudaDeviceSetLimit` will return\n :py:obj:`~.cudaErrorMemoryAllocation`, and the limit can be reset to\n a lower value. This limit is only applicable to devices of compute\n capability 3.5 and higher. Attempting to set this limit on devices of\n compute capability less than 3.5 will result in the error\n :py:obj:`~.cudaErrorUnsupportedLimit` being returned.\n\n- :py:obj:`~.cudaLimitMaxL2FetchGranularity` controls the L2 cache\n fetch granularity. Values can range from 0B to 128B. This is purel""y a\n performance hint and it can be ignored or clamped depending on the\n platform.\n\n- :py:obj:`~.cudaLimitPersistingL2CacheSize` controls size in bytes\n available for persisting L2 cache. This is purely a performance hint\n and it can be ignored or clamped depending on the platform.\n\nParameters\n----------\nlimit : :py:obj:`~.cudaLimit`\n Limit to set\nvalue : size_t\n Size of limit\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorUnsupportedLimit`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorMemoryAllocation`\n\nSee Also\n--------\n:py:obj:`~.cudaDeviceGetLimit`, :py:obj:`~.cuCtxSetLimit`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_5cudaDeviceSetLimit = {"cudaDeviceSetLimit", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_5cudaDeviceSetLimit, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_4cudaDeviceSetLimit}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_5cudaDeviceSetLimit(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_limit = 0; size_t __pyx_v_value; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaDeviceSetLimit (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_limit,&__pyx_mstate_global->__pyx_n_u_value,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 13971, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 13971, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13971, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaDeviceSetLimit", 0) < (0)) __PYX_ERR(0, 13971, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaDeviceSetLimit", 1, 2, 2, i); __PYX_ERR(0, 13971, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 13971, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 13971, __pyx_L3_error) } __pyx_v_limit = values[0]; __pyx_v_value = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_value == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 13972, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaDeviceSetLimit", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 13971, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceSetLimit", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_limit) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "limit"); __PYX_ERR(0, 13972, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_4cudaDeviceSetLimit(__pyx_self, __pyx_v_limit, __pyx_v_value); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_4cudaDeviceSetLimit(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_limit, size_t __pyx_v_value) { enum cudaLimit __pyx_v_cylimit; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; enum cudaLimit __pyx_t_2; cudaError_t __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaDeviceSetLimit", 0); /* "cuda/bindings/runtime.pyx":14066 * :py:obj:`~.cudaDeviceGetLimit`, :py:obj:`~.cuCtxSetLimit` * """ * cdef cyruntime.cudaLimit cylimit = limit.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaDeviceSetLimit(cylimit, value) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_limit, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14066, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = ((enum cudaLimit)__Pyx_PyLong_As_enum__cudaLimit(__pyx_t_1)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 14066, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cylimit = __pyx_t_2; /* "cuda/bindings/runtime.pyx":14067 * """ * cdef cyruntime.cudaLimit cylimit = limit.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceSetLimit(cylimit, value) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":14068 * cdef cyruntime.cudaLimit cylimit = limit.value * with nogil: * err = cyruntime.cudaDeviceSetLimit(cylimit, value) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_3 = __pyx_f_4cuda_8bindings_9cyruntime_cudaDeviceSetLimit(__pyx_v_cylimit, __pyx_v_value); if (unlikely(__pyx_t_3 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 14068, __pyx_L4_error) __pyx_v_err = __pyx_t_3; } /* "cuda/bindings/runtime.pyx":14067 * """ * cdef cyruntime.cudaLimit cylimit = limit.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceSetLimit(cylimit, value) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":14069 * with nogil: * err = cyruntime.cudaDeviceSetLimit(cylimit, value) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14069, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14069, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14069, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14069, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 14069, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":13971 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceSetLimit(limit not None : cudaLimit, size_t value): * """ Set resource limits. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceSetLimit", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":14071 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceGetLimit(limit not None : cudaLimit): * """ Return resource limits. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_7cudaDeviceGetLimit(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_6cudaDeviceGetLimit, "cudaDeviceGetLimit(limit: cudaLimit)\n\nReturn resource limits.\n\nReturns in `*pValue` the current size of `limit`. The following\n:py:obj:`~.cudaLimit` values are supported.\n\n- :py:obj:`~.cudaLimitStackSize` is the stack size in bytes of each GPU\n thread.\n\n- :py:obj:`~.cudaLimitPrintfFifoSize` is the size in bytes of the\n shared FIFO used by the :py:obj:`~.printf()` device system call.\n\n- :py:obj:`~.cudaLimitMallocHeapSize` is the size in bytes of the heap\n used by the :py:obj:`~.malloc()` and :py:obj:`~.free()` device system\n calls.\n\n- :py:obj:`~.cudaLimitDevRuntimeSyncDepth` is the maximum grid depth at\n which a thread can isssue the device runtime call\n :py:obj:`~.cudaDeviceSynchronize()` to wait on child grid launches to\n complete. This functionality is removed for devices of compute\n capability >= 9.0, and hence will return error\n :py:obj:`~.cudaErrorUnsupportedLimit` on such devices.\n\n- :py:obj:`~.cudaLimitDevRuntimePendingLaunchCount` is the maximum\n number of outstanding device runtime launches.\n\n- :py:obj:`~.cudaLimitMaxL2FetchGranularity` is the L2 cache fetch\n granularity.\n\n- :py:obj:`~.cudaLimitPersistingL2CacheSize` is the persisting L2 cache\n size in bytes.\n\nParameters\n----------\nlimit : :py:obj:`~.cudaLimit`\n Limit to query\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorUnsupportedLimit`, :py:obj:`~.cudaErrorInvalidValue`\npValue : int\n Returned size of the limit\n\nSee Also\n--------\n:py:obj:`~.cudaDeviceSetLimit`, :py:obj:`~.cuCtxGetLimit`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_7cudaDeviceGetLimit = {"cudaDeviceGetLimit", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_7cudaDeviceGetLimit, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_6cudaDeviceGetLimit}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_7cudaDeviceGetLimit(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_limit = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaDeviceGetLimit (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_limit,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 14071, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14071, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaDeviceGetLimit", 0) < (0)) __PYX_ERR(0, 14071, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaDeviceGetLimit", 1, 1, 1, i); __PYX_ERR(0, 14071, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14071, __pyx_L3_error) } __pyx_v_limit = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaDeviceGetLimit", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 14071, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceGetLimit", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_limit) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "limit"); __PYX_ERR(0, 14072, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_6cudaDeviceGetLimit(__pyx_self, __pyx_v_limit); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_6cudaDeviceGetLimit(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_limit) { size_t __pyx_v_pValue; enum cudaLimit __pyx_v_cylimit; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; enum cudaLimit __pyx_t_2; cudaError_t __pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaDeviceGetLimit", 0); /* "cuda/bindings/runtime.pyx":14120 * :py:obj:`~.cudaDeviceSetLimit`, :py:obj:`~.cuCtxGetLimit` * """ * cdef size_t pValue = 0 # <<<<<<<<<<<<<< * cdef cyruntime.cudaLimit cylimit = limit.value * with nogil: */ __pyx_v_pValue = 0; /* "cuda/bindings/runtime.pyx":14121 * """ * cdef size_t pValue = 0 * cdef cyruntime.cudaLimit cylimit = limit.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaDeviceGetLimit(&pValue, cylimit) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_limit, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14121, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = ((enum cudaLimit)__Pyx_PyLong_As_enum__cudaLimit(__pyx_t_1)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 14121, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cylimit = __pyx_t_2; /* "cuda/bindings/runtime.pyx":14122 * cdef size_t pValue = 0 * cdef cyruntime.cudaLimit cylimit = limit.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceGetLimit(&pValue, cylimit) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":14123 * cdef cyruntime.cudaLimit cylimit = limit.value * with nogil: * err = cyruntime.cudaDeviceGetLimit(&pValue, cylimit) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_3 = __pyx_f_4cuda_8bindings_9cyruntime_cudaDeviceGetLimit((&__pyx_v_pValue), __pyx_v_cylimit); if (unlikely(__pyx_t_3 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 14123, __pyx_L4_error) __pyx_v_err = __pyx_t_3; } /* "cuda/bindings/runtime.pyx":14122 * cdef size_t pValue = 0 * cdef cyruntime.cudaLimit cylimit = limit.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceGetLimit(&pValue, cylimit) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":14124 * with nogil: * err = cyruntime.cudaDeviceGetLimit(&pValue, cylimit) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pValue) */ __pyx_t_4 = (__pyx_v_err != cudaSuccess); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":14125 * err = cyruntime.cudaDeviceGetLimit(&pValue, cylimit) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], pValue) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14125, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14125, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14125, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14125, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 14125, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, Py_None) != (0)) __PYX_ERR(0, 14125, __pyx_L1_error); __pyx_t_6 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":14124 * with nogil: * err = cyruntime.cudaDeviceGetLimit(&pValue, cylimit) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pValue) */ } /* "cuda/bindings/runtime.pyx":14126 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pValue) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14126, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14126, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14126, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyLong_FromSize_t(__pyx_v_pValue); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14126, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14126, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 14126, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_6); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_6) != (0)) __PYX_ERR(0, 14126, __pyx_L1_error); __pyx_t_1 = 0; __pyx_t_6 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":14071 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceGetLimit(limit not None : cudaLimit): * """ Return resource limits. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceGetLimit", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":14128 * return (_dict_cudaError_t[err], pValue) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceGetTexture1DLinearMaxWidth(fmtDesc : Optional[cudaChannelFormatDesc], int device): * """ Returns the maximum number of elements allocatable in a 1D linear texture for a given element size. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_9cudaDeviceGetTexture1DLinearMaxWidth(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_8cudaDeviceGetTexture1DLinearMaxWidth, "cudaDeviceGetTexture1DLinearMaxWidth(cudaChannelFormatDesc fmtDesc: Optional[cudaChannelFormatDesc], int device)\n\nReturns the maximum number of elements allocatable in a 1D linear texture for a given element size.\n\nReturns in `maxWidthInElements` the maximum number of elements\nallocatable in a 1D linear texture for given format descriptor\n`fmtDesc`.\n\nParameters\n----------\nfmtDesc : :py:obj:`~.cudaChannelFormatDesc`\n Texture format description.\nNone : int\n None\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorUnsupportedLimit`, :py:obj:`~.cudaErrorInvalidValue`\nmaxWidthInElements : int\n Returns maximum number of texture elements allocatable for given\n `fmtDesc`.\n\nSee Also\n--------\n:py:obj:`~.cuDeviceGetTexture1DLinearMaxWidth`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_9cudaDeviceGetTexture1DLinearMaxWidth = {"cudaDeviceGetTexture1DLinearMaxWidth", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_9cudaDeviceGetTexture1DLinearMaxWidth, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_8cudaDeviceGetTexture1DLinearMaxWidth}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_9cudaDeviceGetTexture1DLinearMaxWidth(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaChannelFormatDesc *__pyx_v_fmtDesc = 0; int __pyx_v_device; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaDeviceGetTexture1DLinearMaxWidth (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_fmtDesc,&__pyx_mstate_global->__pyx_n_u_device_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 14128, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 14128, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14128, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaDeviceGetTexture1DLinearMaxWidth", 0) < (0)) __PYX_ERR(0, 14128, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaDeviceGetTexture1DLinearMaxWidth", 1, 2, 2, i); __PYX_ERR(0, 14128, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14128, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 14128, __pyx_L3_error) } __pyx_v_fmtDesc = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaChannelFormatDesc *)values[0]); __pyx_v_device = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_device == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 14129, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaDeviceGetTexture1DLinearMaxWidth", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 14128, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceGetTexture1DLinearMaxWidth", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_fmtDesc), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaChannelFormatDesc, 1, "fmtDesc", 0))) __PYX_ERR(0, 14129, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_8cudaDeviceGetTexture1DLinearMaxWidth(__pyx_self, __pyx_v_fmtDesc, __pyx_v_device); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_8cudaDeviceGetTexture1DLinearMaxWidth(CYTHON_UNUSED PyObject *__pyx_self, struct __pyx_obj_4cuda_8bindings_7runtime_cudaChannelFormatDesc *__pyx_v_fmtDesc, int __pyx_v_device) { size_t __pyx_v_maxWidthInElements; struct cudaChannelFormatDesc *__pyx_v_cyfmtDesc_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations struct cudaChannelFormatDesc *__pyx_t_1; int __pyx_t_2; cudaError_t __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaDeviceGetTexture1DLinearMaxWidth", 0); /* "cuda/bindings/runtime.pyx":14155 * :py:obj:`~.cuDeviceGetTexture1DLinearMaxWidth` * """ * cdef size_t maxWidthInElements = 0 # <<<<<<<<<<<<<< * cdef cyruntime.cudaChannelFormatDesc* cyfmtDesc_ptr = fmtDesc._pvt_ptr if fmtDesc is not None else NULL * with nogil: */ __pyx_v_maxWidthInElements = 0; /* "cuda/bindings/runtime.pyx":14156 * """ * cdef size_t maxWidthInElements = 0 * cdef cyruntime.cudaChannelFormatDesc* cyfmtDesc_ptr = fmtDesc._pvt_ptr if fmtDesc is not None else NULL # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaDeviceGetTexture1DLinearMaxWidth(&maxWidthInElements, cyfmtDesc_ptr, device) */ __pyx_t_2 = (((PyObject *)__pyx_v_fmtDesc) != Py_None); if (__pyx_t_2) { __pyx_t_1 = __pyx_v_fmtDesc->_pvt_ptr; } else { __pyx_t_1 = NULL; } __pyx_v_cyfmtDesc_ptr = __pyx_t_1; /* "cuda/bindings/runtime.pyx":14157 * cdef size_t maxWidthInElements = 0 * cdef cyruntime.cudaChannelFormatDesc* cyfmtDesc_ptr = fmtDesc._pvt_ptr if fmtDesc is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceGetTexture1DLinearMaxWidth(&maxWidthInElements, cyfmtDesc_ptr, device) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":14158 * cdef cyruntime.cudaChannelFormatDesc* cyfmtDesc_ptr = fmtDesc._pvt_ptr if fmtDesc is not None else NULL * with nogil: * err = cyruntime.cudaDeviceGetTexture1DLinearMaxWidth(&maxWidthInElements, cyfmtDesc_ptr, device) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_3 = __pyx_f_4cuda_8bindings_9cyruntime_cudaDeviceGetTexture1DLinearMaxWidth((&__pyx_v_maxWidthInElements), __pyx_v_cyfmtDesc_ptr, __pyx_v_device); if (unlikely(__pyx_t_3 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 14158, __pyx_L4_error) __pyx_v_err = __pyx_t_3; } /* "cuda/bindings/runtime.pyx":14157 * cdef size_t maxWidthInElements = 0 * cdef cyruntime.cudaChannelFormatDesc* cyfmtDesc_ptr = fmtDesc._pvt_ptr if fmtDesc is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceGetTexture1DLinearMaxWidth(&maxWidthInElements, cyfmtDesc_ptr, device) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":14159 * with nogil: * err = cyruntime.cudaDeviceGetTexture1DLinearMaxWidth(&maxWidthInElements, cyfmtDesc_ptr, device) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], maxWidthInElements) */ __pyx_t_2 = (__pyx_v_err != cudaSuccess); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":14160 * err = cyruntime.cudaDeviceGetTexture1DLinearMaxWidth(&maxWidthInElements, cyfmtDesc_ptr, device) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], maxWidthInElements) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14160, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14160, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14160, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14160, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 14160, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, Py_None) != (0)) __PYX_ERR(0, 14160, __pyx_L1_error); __pyx_t_6 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":14159 * with nogil: * err = cyruntime.cudaDeviceGetTexture1DLinearMaxWidth(&maxWidthInElements, cyfmtDesc_ptr, device) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], maxWidthInElements) */ } /* "cuda/bindings/runtime.pyx":14161 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], maxWidthInElements) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14161, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14161, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_6); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14161, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyLong_FromSize_t(__pyx_v_maxWidthInElements); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14161, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14161, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 14161, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_6); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_6) != (0)) __PYX_ERR(0, 14161, __pyx_L1_error); __pyx_t_4 = 0; __pyx_t_6 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":14128 * return (_dict_cudaError_t[err], pValue) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceGetTexture1DLinearMaxWidth(fmtDesc : Optional[cudaChannelFormatDesc], int device): * """ Returns the maximum number of elements allocatable in a 1D linear texture for a given element size. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceGetTexture1DLinearMaxWidth", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":14163 * return (_dict_cudaError_t[err], maxWidthInElements) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceGetCacheConfig(): * """ Returns the preferred cache configuration for the current device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_11cudaDeviceGetCacheConfig(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_10cudaDeviceGetCacheConfig, "cudaDeviceGetCacheConfig()\n\nReturns the preferred cache configuration for the current device.\n\nOn devices where the L1 cache and shared memory use the same hardware\nresources, this returns through `pCacheConfig` the preferred cache\nconfiguration for the current device. This is only a preference. The\nruntime will use the requested configuration if possible, but it is\nfree to choose a different configuration if required to execute\nfunctions.\n\nThis will return a `pCacheConfig` of\n:py:obj:`~.cudaFuncCachePreferNone` on devices where the size of the L1\ncache and shared memory are fixed.\n\nThe supported cache configurations are:\n\n- :py:obj:`~.cudaFuncCachePreferNone`: no preference for shared memory\n or L1 (default)\n\n- :py:obj:`~.cudaFuncCachePreferShared`: prefer larger shared memory\n and smaller L1 cache\n\n- :py:obj:`~.cudaFuncCachePreferL1`: prefer larger L1 cache and smaller\n shared memory\n\n- :py:obj:`~.cudaFuncCachePreferEqual`: prefer equal size L1 cache and\n shared memory\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`\npCacheConfig : :py:obj:`~.cudaFuncCache`\n Returned cache configuration\n\nSee Also\n--------\n:py:obj:`~.cudaDeviceSetCacheConfig`, :py:obj:`~.cudaFuncSetCacheConfig (C API)`, cudaFuncSetCacheConfig (C++ API), :py:obj:`~.cuCtxGetCacheConfig`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_11cudaDeviceGetCacheConfig = {"cudaDeviceGetCacheConfig", (PyCFunction)__pyx_pw_4cuda_8bindings_7runtime_11cudaDeviceGetCacheConfig, METH_NOARGS, __pyx_doc_4cuda_8bindings_7runtime_10cudaDeviceGetCacheConfig}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_11cudaDeviceGetCacheConfig(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaDeviceGetCacheConfig (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_10cudaDeviceGetCacheConfig(__pyx_self); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_10cudaDeviceGetCacheConfig(CYTHON_UNUSED PyObject *__pyx_self) { enum cudaFuncCache __pyx_v_pCacheConfig; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations cudaError_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; size_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaDeviceGetCacheConfig", 0); /* "cuda/bindings/runtime.pyx":14204 * """ * cdef cyruntime.cudaFuncCache pCacheConfig * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceGetCacheConfig(&pCacheConfig) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":14205 * cdef cyruntime.cudaFuncCache pCacheConfig * with nogil: * err = cyruntime.cudaDeviceGetCacheConfig(&pCacheConfig) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_1 = __pyx_f_4cuda_8bindings_9cyruntime_cudaDeviceGetCacheConfig((&__pyx_v_pCacheConfig)); if (unlikely(__pyx_t_1 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 14205, __pyx_L4_error) __pyx_v_err = __pyx_t_1; } /* "cuda/bindings/runtime.pyx":14204 * """ * cdef cyruntime.cudaFuncCache pCacheConfig * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceGetCacheConfig(&pCacheConfig) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":14206 * with nogil: * err = cyruntime.cudaDeviceGetCacheConfig(&pCacheConfig) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], cudaFuncCache(pCacheConfig)) */ __pyx_t_2 = (__pyx_v_err != cudaSuccess); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":14207 * err = cyruntime.cudaDeviceGetCacheConfig(&pCacheConfig) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], cudaFuncCache(pCacheConfig)) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14207, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14207, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14207, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14207, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 14207, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, Py_None) != (0)) __PYX_ERR(0, 14207, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":14206 * with nogil: * err = cyruntime.cudaDeviceGetCacheConfig(&pCacheConfig) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], cudaFuncCache(pCacheConfig)) */ } /* "cuda/bindings/runtime.pyx":14208 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], cudaFuncCache(pCacheConfig)) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14208, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14208, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14208, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_4 = NULL; __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_cudaFuncCache); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14208, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyLong_From_enum__cudaFuncCache(__pyx_v_pCacheConfig); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 14208, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_8 = 1; #if CYTHON_UNPACK_METHODS if (unlikely(PyMethod_Check(__pyx_t_6))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_6); assert(__pyx_t_4); PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx__function); __Pyx_DECREF_SET(__pyx_t_6, __pyx__function); __pyx_t_8 = 0; } #endif { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_7}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_6, __pyx_callargs+__pyx_t_8, (2-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14208, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); } __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14208, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 14208, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_5) != (0)) __PYX_ERR(0, 14208, __pyx_L1_error); __pyx_t_3 = 0; __pyx_t_5 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":14163 * return (_dict_cudaError_t[err], maxWidthInElements) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceGetCacheConfig(): * """ Returns the preferred cache configuration for the current device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceGetCacheConfig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":14210 * return (_dict_cudaError_t[err], cudaFuncCache(pCacheConfig)) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceGetStreamPriorityRange(): * """ Returns numerical values that correspond to the least and greatest stream priorities. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13cudaDeviceGetStreamPriorityRange(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_12cudaDeviceGetStreamPriorityRange, "cudaDeviceGetStreamPriorityRange()\n\nReturns numerical values that correspond to the least and greatest stream priorities.\n\nReturns in `*leastPriority` and `*greatestPriority` the numerical\nvalues that correspond to the least and greatest stream priorities\nrespectively. Stream priorities follow a convention where lower numbers\nimply greater priorities. The range of meaningful stream priorities is\ngiven by [`*greatestPriority`, `*leastPriority`]. If the user attempts\nto create a stream with a priority value that is outside the the\nmeaningful range as specified by this API, the priority is\nautomatically clamped down or up to either `*leastPriority` or\n`*greatestPriority` respectively. See\n:py:obj:`~.cudaStreamCreateWithPriority` for details on creating a\npriority stream. A NULL may be passed in for `*leastPriority` or\n`*greatestPriority` if the value is not desired.\n\nThis function will return '0' in both `*leastPriority` and\n`*greatestPriority` if the current context's device does not support\nstream priorities (see :py:obj:`~.cudaDeviceGetAttribute`).\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`\nleastPriority : int\n Pointer to an int in which the numerical value for least stream\n priority is returned\ngreatestPriority : int\n Pointer to an int in which the numerical value for greatest stream\n priority is returned\n\nSee Also\n--------\n:py:obj:`~.cudaStreamCreateWithPriority`, :py:obj:`~.cudaStreamGetPriority`, :py:obj:`~.cuCtxGetStreamPriorityRange`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_13cudaDeviceGetStreamPriorityRange = {"cudaDeviceGetStreamPriorityRange", (PyCFunction)__pyx_pw_4cuda_8bindings_7runtime_13cudaDeviceGetStreamPriorityRange, METH_NOARGS, __pyx_doc_4cuda_8bindings_7runtime_12cudaDeviceGetStreamPriorityRange}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_13cudaDeviceGetStreamPriorityRange(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaDeviceGetStreamPriorityRange (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_12cudaDeviceGetStreamPriorityRange(__pyx_self); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_12cudaDeviceGetStreamPriorityRange(CYTHON_UNUSED PyObject *__pyx_self) { int __pyx_v_leastPriority; int __pyx_v_greatestPriority; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations cudaError_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaDeviceGetStreamPriorityRange", 0); /* "cuda/bindings/runtime.pyx":14246 * :py:obj:`~.cudaStreamCreateWithPriority`, :py:obj:`~.cudaStreamGetPriority`, :py:obj:`~.cuCtxGetStreamPriorityRange` * """ * cdef int leastPriority = 0 # <<<<<<<<<<<<<< * cdef int greatestPriority = 0 * with nogil: */ __pyx_v_leastPriority = 0; /* "cuda/bindings/runtime.pyx":14247 * """ * cdef int leastPriority = 0 * cdef int greatestPriority = 0 # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaDeviceGetStreamPriorityRange(&leastPriority, &greatestPriority) */ __pyx_v_greatestPriority = 0; /* "cuda/bindings/runtime.pyx":14248 * cdef int leastPriority = 0 * cdef int greatestPriority = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceGetStreamPriorityRange(&leastPriority, &greatestPriority) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":14249 * cdef int greatestPriority = 0 * with nogil: * err = cyruntime.cudaDeviceGetStreamPriorityRange(&leastPriority, &greatestPriority) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None, None) */ __pyx_t_1 = __pyx_f_4cuda_8bindings_9cyruntime_cudaDeviceGetStreamPriorityRange((&__pyx_v_leastPriority), (&__pyx_v_greatestPriority)); if (unlikely(__pyx_t_1 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 14249, __pyx_L4_error) __pyx_v_err = __pyx_t_1; } /* "cuda/bindings/runtime.pyx":14248 * cdef int leastPriority = 0 * cdef int greatestPriority = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceGetStreamPriorityRange(&leastPriority, &greatestPriority) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":14250 * with nogil: * err = cyruntime.cudaDeviceGetStreamPriorityRange(&leastPriority, &greatestPriority) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None, None) * return (_dict_cudaError_t[err], leastPriority, greatestPriority) */ __pyx_t_2 = (__pyx_v_err != cudaSuccess); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":14251 * err = cyruntime.cudaDeviceGetStreamPriorityRange(&leastPriority, &greatestPriority) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None, None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], leastPriority, greatestPriority) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 14251, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, Py_None) != (0)) __PYX_ERR(0, 14251, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 2, Py_None) != (0)) __PYX_ERR(0, 14251, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":14250 * with nogil: * err = cyruntime.cudaDeviceGetStreamPriorityRange(&leastPriority, &greatestPriority) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None, None) * return (_dict_cudaError_t[err], leastPriority, greatestPriority) */ } /* "cuda/bindings/runtime.pyx":14252 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None, None) * return (_dict_cudaError_t[err], leastPriority, greatestPriority) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14252, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14252, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14252, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyLong_From_int(__pyx_v_leastPriority); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14252, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyLong_From_int(__pyx_v_greatestPriority); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14252, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyTuple_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14252, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 14252, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_5) != (0)) __PYX_ERR(0, 14252, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 2, __pyx_t_4) != (0)) __PYX_ERR(0, 14252, __pyx_L1_error); __pyx_t_3 = 0; __pyx_t_5 = 0; __pyx_t_4 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":14210 * return (_dict_cudaError_t[err], cudaFuncCache(pCacheConfig)) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceGetStreamPriorityRange(): * """ Returns numerical values that correspond to the least and greatest stream priorities. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceGetStreamPriorityRange", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":14254 * return (_dict_cudaError_t[err], leastPriority, greatestPriority) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceSetCacheConfig(cacheConfig not None : cudaFuncCache): * """ Sets the preferred cache configuration for the current device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaDeviceSetCacheConfig(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_14cudaDeviceSetCacheConfig, "cudaDeviceSetCacheConfig(cacheConfig: cudaFuncCache)\n\nSets the preferred cache configuration for the current device.\n\nOn devices where the L1 cache and shared memory use the same hardware\nresources, this sets through `cacheConfig` the preferred cache\nconfiguration for the current device. This is only a preference. The\nruntime will use the requested configuration if possible, but it is\nfree to choose a different configuration if required to execute the\nfunction. Any function preference set via\n:py:obj:`~.cudaFuncSetCacheConfig (C API)` or cudaFuncSetCacheConfig\n(C++ API) will be preferred over this device-wide setting. Setting the\ndevice-wide cache configuration to :py:obj:`~.cudaFuncCachePreferNone`\nwill cause subsequent kernel launches to prefer to not change the cache\nconfiguration unless required to launch the kernel.\n\nThis setting does nothing on devices where the size of the L1 cache and\nshared memory are fixed.\n\nLaunching a kernel with a different preference than the most recent\npreference setting may insert a device-side synchronization point.\n\nThe supported cache configurations are:\n\n- :py:obj:`~.cudaFuncCachePreferNone`: no preference for shared memory\n or L1 (default)\n\n- :py:obj:`~.cudaFuncCachePreferShared`: prefer larger shared memory\n and smaller L1 cache\n\n- :py:obj:`~.cudaFuncCachePreferL1`: prefer larger L1 cache and smaller\n shared memory\n\n- :py:obj:`~.cudaFuncCachePreferEqual`: prefer equal size L1 cache and\n shared memory\n\nParameters\n----------\ncacheConfig : :py:obj:`~.cudaFuncCache`\n Requested cache configuration\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`\n\nSee Also\n--------\n:py:obj:`~.cudaDeviceGetCacheConfig`, :py:obj:`~.cudaFuncSetCacheConfig (C API)`, cudaFuncSetCacheConfig (C++ API), :py:obj:`~.cuCtxSetCacheConfig`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_15cudaDeviceSetCacheConfig = {"cudaDeviceSetCacheConfig", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_15cudaDeviceSetCacheConfig, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_14cudaDeviceSetCacheConfig}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_15cudaDeviceSetCacheConfig(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_cacheConfig = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaDeviceSetCacheConfig (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_cacheConfig,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 14254, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14254, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaDeviceSetCacheConfig", 0) < (0)) __PYX_ERR(0, 14254, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaDeviceSetCacheConfig", 1, 1, 1, i); __PYX_ERR(0, 14254, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14254, __pyx_L3_error) } __pyx_v_cacheConfig = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaDeviceSetCacheConfig", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 14254, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceSetCacheConfig", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_cacheConfig) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "cacheConfig"); __PYX_ERR(0, 14255, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_14cudaDeviceSetCacheConfig(__pyx_self, __pyx_v_cacheConfig); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_14cudaDeviceSetCacheConfig(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_cacheConfig) { enum cudaFuncCache __pyx_v_cycacheConfig; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; enum cudaFuncCache __pyx_t_2; cudaError_t __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaDeviceSetCacheConfig", 0); /* "cuda/bindings/runtime.pyx":14304 * :py:obj:`~.cudaDeviceGetCacheConfig`, :py:obj:`~.cudaFuncSetCacheConfig (C API)`, cudaFuncSetCacheConfig (C++ API), :py:obj:`~.cuCtxSetCacheConfig` * """ * cdef cyruntime.cudaFuncCache cycacheConfig = cacheConfig.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaDeviceSetCacheConfig(cycacheConfig) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_cacheConfig, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14304, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = ((enum cudaFuncCache)__Pyx_PyLong_As_enum__cudaFuncCache(__pyx_t_1)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 14304, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cycacheConfig = __pyx_t_2; /* "cuda/bindings/runtime.pyx":14305 * """ * cdef cyruntime.cudaFuncCache cycacheConfig = cacheConfig.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceSetCacheConfig(cycacheConfig) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":14306 * cdef cyruntime.cudaFuncCache cycacheConfig = cacheConfig.value * with nogil: * err = cyruntime.cudaDeviceSetCacheConfig(cycacheConfig) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_3 = __pyx_f_4cuda_8bindings_9cyruntime_cudaDeviceSetCacheConfig(__pyx_v_cycacheConfig); if (unlikely(__pyx_t_3 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 14306, __pyx_L4_error) __pyx_v_err = __pyx_t_3; } /* "cuda/bindings/runtime.pyx":14305 * """ * cdef cyruntime.cudaFuncCache cycacheConfig = cacheConfig.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceSetCacheConfig(cycacheConfig) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":14307 * with nogil: * err = cyruntime.cudaDeviceSetCacheConfig(cycacheConfig) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14307, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14307, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14307, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14307, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 14307, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":14254 * return (_dict_cudaError_t[err], leastPriority, greatestPriority) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceSetCacheConfig(cacheConfig not None : cudaFuncCache): * """ Sets the preferred cache configuration for the current device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceSetCacheConfig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":14309 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceGetByPCIBusId(char* pciBusId): * """ Returns a handle to a compute device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_17cudaDeviceGetByPCIBusId(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_16cudaDeviceGetByPCIBusId, "cudaDeviceGetByPCIBusId(char *pciBusId)\n\nReturns a handle to a compute device.\n\nReturns in `*device` a device ordinal given a PCI bus ID string.\n\nwhere `domain`, `bus`, `device`, and `function` are all hexadecimal\nvalues\n\nParameters\n----------\npciBusId : bytes\n String in one of the following forms:\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidDevice`\ndevice : int\n Returned device ordinal\n\nSee Also\n--------\n:py:obj:`~.cudaDeviceGetPCIBusId`, :py:obj:`~.cuDeviceGetByPCIBusId`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_17cudaDeviceGetByPCIBusId = {"cudaDeviceGetByPCIBusId", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_17cudaDeviceGetByPCIBusId, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_16cudaDeviceGetByPCIBusId}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_17cudaDeviceGetByPCIBusId(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { char *__pyx_v_pciBusId; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaDeviceGetByPCIBusId (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pciBusId,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 14309, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14309, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaDeviceGetByPCIBusId", 0) < (0)) __PYX_ERR(0, 14309, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaDeviceGetByPCIBusId", 1, 1, 1, i); __PYX_ERR(0, 14309, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14309, __pyx_L3_error) } __pyx_v_pciBusId = __Pyx_PyObject_AsWritableString(values[0]); if (unlikely((!__pyx_v_pciBusId) && PyErr_Occurred())) __PYX_ERR(0, 14310, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaDeviceGetByPCIBusId", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 14309, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceGetByPCIBusId", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_16cudaDeviceGetByPCIBusId(__pyx_self, __pyx_v_pciBusId); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_16cudaDeviceGetByPCIBusId(CYTHON_UNUSED PyObject *__pyx_self, char *__pyx_v_pciBusId) { int __pyx_v_device; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations cudaError_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaDeviceGetByPCIBusId", 0); /* "cuda/bindings/runtime.pyx":14334 * :py:obj:`~.cudaDeviceGetPCIBusId`, :py:obj:`~.cuDeviceGetByPCIBusId` * """ * cdef int device = 0 # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaDeviceGetByPCIBusId(&device, pciBusId) */ __pyx_v_device = 0; /* "cuda/bindings/runtime.pyx":14335 * """ * cdef int device = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceGetByPCIBusId(&device, pciBusId) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":14336 * cdef int device = 0 * with nogil: * err = cyruntime.cudaDeviceGetByPCIBusId(&device, pciBusId) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_1 = __pyx_f_4cuda_8bindings_9cyruntime_cudaDeviceGetByPCIBusId((&__pyx_v_device), __pyx_v_pciBusId); if (unlikely(__pyx_t_1 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 14336, __pyx_L4_error) __pyx_v_err = __pyx_t_1; } /* "cuda/bindings/runtime.pyx":14335 * """ * cdef int device = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceGetByPCIBusId(&device, pciBusId) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":14337 * with nogil: * err = cyruntime.cudaDeviceGetByPCIBusId(&device, pciBusId) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], device) */ __pyx_t_2 = (__pyx_v_err != cudaSuccess); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":14338 * err = cyruntime.cudaDeviceGetByPCIBusId(&device, pciBusId) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], device) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14338, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14338, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14338, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14338, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 14338, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, Py_None) != (0)) __PYX_ERR(0, 14338, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":14337 * with nogil: * err = cyruntime.cudaDeviceGetByPCIBusId(&device, pciBusId) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], device) */ } /* "cuda/bindings/runtime.pyx":14339 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], device) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14339, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14339, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14339, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyLong_From_int(__pyx_v_device); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14339, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14339, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 14339, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_5) != (0)) __PYX_ERR(0, 14339, __pyx_L1_error); __pyx_t_3 = 0; __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":14309 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceGetByPCIBusId(char* pciBusId): * """ Returns a handle to a compute device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceGetByPCIBusId", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":14341 * return (_dict_cudaError_t[err], device) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceGetPCIBusId(int length, int device): * """ Returns a PCI Bus Id string for the device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaDeviceGetPCIBusId(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_18cudaDeviceGetPCIBusId, "cudaDeviceGetPCIBusId(int length, int device)\n\nReturns a PCI Bus Id string for the device.\n\nReturns an ASCII string identifying the device `dev` in the NULL-\nterminated string pointed to by `pciBusId`. `length` specifies the\nmaximum length of the string that may be returned.\n\nwhere `domain`, `bus`, `device`, and `function` are all hexadecimal\nvalues. pciBusId should be large enough to store 13 characters\nincluding the NULL-terminator.\n\nParameters\n----------\nlength : int\n Maximum length of string to store in `name`\ndevice : int\n Device to get identifier string for\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidDevice`\npciBusId : bytes\n Returned identifier string for the device in the following format\n\nSee Also\n--------\n:py:obj:`~.cudaDeviceGetByPCIBusId`, :py:obj:`~.cuDeviceGetPCIBusId`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_19cudaDeviceGetPCIBusId = {"cudaDeviceGetPCIBusId", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_19cudaDeviceGetPCIBusId, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_18cudaDeviceGetPCIBusId}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_19cudaDeviceGetPCIBusId(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { int __pyx_v_length; int __pyx_v_device; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaDeviceGetPCIBusId (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_length,&__pyx_mstate_global->__pyx_n_u_device_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 14341, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 14341, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14341, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaDeviceGetPCIBusId", 0) < (0)) __PYX_ERR(0, 14341, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaDeviceGetPCIBusId", 1, 2, 2, i); __PYX_ERR(0, 14341, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14341, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 14341, __pyx_L3_error) } __pyx_v_length = __Pyx_PyLong_As_int(values[0]); if (unlikely((__pyx_v_length == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 14342, __pyx_L3_error) __pyx_v_device = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_device == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 14342, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaDeviceGetPCIBusId", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 14341, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceGetPCIBusId", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_18cudaDeviceGetPCIBusId(__pyx_self, __pyx_v_length, __pyx_v_device); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_18cudaDeviceGetPCIBusId(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_length, int __pyx_v_device) { PyObject *__pyx_v_pypciBusId = NULL; char *__pyx_v_pciBusId; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; char *__pyx_t_2; cudaError_t __pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaDeviceGetPCIBusId", 0); /* "cuda/bindings/runtime.pyx":14371 * :py:obj:`~.cudaDeviceGetByPCIBusId`, :py:obj:`~.cuDeviceGetPCIBusId` * """ * pypciBusId = b" " * length # <<<<<<<<<<<<<< * cdef char* pciBusId = pypciBusId * with nogil: */ __pyx_t_1 = __Pyx_PySequence_Multiply(__pyx_mstate_global->__pyx_kp_b__5, __pyx_v_length); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14371, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_pypciBusId = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":14372 * """ * pypciBusId = b" " * length * cdef char* pciBusId = pypciBusId # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaDeviceGetPCIBusId(pciBusId, length, device) */ __pyx_t_2 = __Pyx_PyBytes_AsWritableString(__pyx_v_pypciBusId); if (unlikely((!__pyx_t_2) && PyErr_Occurred())) __PYX_ERR(0, 14372, __pyx_L1_error) __pyx_v_pciBusId = __pyx_t_2; /* "cuda/bindings/runtime.pyx":14373 * pypciBusId = b" " * length * cdef char* pciBusId = pypciBusId * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceGetPCIBusId(pciBusId, length, device) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":14374 * cdef char* pciBusId = pypciBusId * with nogil: * err = cyruntime.cudaDeviceGetPCIBusId(pciBusId, length, device) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_3 = __pyx_f_4cuda_8bindings_9cyruntime_cudaDeviceGetPCIBusId(__pyx_v_pciBusId, __pyx_v_length, __pyx_v_device); if (unlikely(__pyx_t_3 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 14374, __pyx_L4_error) __pyx_v_err = __pyx_t_3; } /* "cuda/bindings/runtime.pyx":14373 * pypciBusId = b" " * length * cdef char* pciBusId = pypciBusId * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceGetPCIBusId(pciBusId, length, device) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":14375 * with nogil: * err = cyruntime.cudaDeviceGetPCIBusId(pciBusId, length, device) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pypciBusId) */ __pyx_t_4 = (__pyx_v_err != cudaSuccess); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":14376 * err = cyruntime.cudaDeviceGetPCIBusId(pciBusId, length, device) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], pypciBusId) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14376, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14376, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14376, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14376, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 14376, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, Py_None) != (0)) __PYX_ERR(0, 14376, __pyx_L1_error); __pyx_t_6 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":14375 * with nogil: * err = cyruntime.cudaDeviceGetPCIBusId(pciBusId, length, device) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pypciBusId) */ } /* "cuda/bindings/runtime.pyx":14377 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pypciBusId) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14377, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14377, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14377, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14377, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 14377, __pyx_L1_error); __Pyx_INCREF(__pyx_v_pypciBusId); __Pyx_GIVEREF(__pyx_v_pypciBusId); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_v_pypciBusId) != (0)) __PYX_ERR(0, 14377, __pyx_L1_error); __pyx_t_1 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":14341 * return (_dict_cudaError_t[err], device) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceGetPCIBusId(int length, int device): * """ Returns a PCI Bus Id string for the device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceGetPCIBusId", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pypciBusId); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":14379 * return (_dict_cudaError_t[err], pypciBusId) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaIpcGetEventHandle(event): * """ Gets an interprocess handle for a previously allocated event. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_21cudaIpcGetEventHandle(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_20cudaIpcGetEventHandle, "cudaIpcGetEventHandle(event)\n\nGets an interprocess handle for a previously allocated event.\n\nTakes as input a previously allocated event. This event must have been\ncreated with the :py:obj:`~.cudaEventInterprocess` and\n:py:obj:`~.cudaEventDisableTiming` flags set. This opaque handle may be\ncopied into other processes and opened with\n:py:obj:`~.cudaIpcOpenEventHandle` to allow efficient hardware\nsynchronization between GPU work in different processes.\n\nAfter the event has been been opened in the importing process,\n:py:obj:`~.cudaEventRecord`, :py:obj:`~.cudaEventSynchronize`,\n:py:obj:`~.cudaStreamWaitEvent` and :py:obj:`~.cudaEventQuery` may be\nused in either process. Performing operations on the imported event\nafter the exported event has been freed with\n:py:obj:`~.cudaEventDestroy` will result in undefined behavior.\n\nIPC functionality is restricted to devices with support for unified\naddressing on Linux and Windows operating systems. IPC functionality on\nWindows is supported for compatibility purposes but not recommended as\nit comes with performance cost. Users can test their device for IPC\nfunctionality by calling :py:obj:`~.cudaDeviceGetAttribute` with\n:py:obj:`~.cudaDevAttrIpcEventSupport`\n\nParameters\n----------\nevent : :py:obj:`~.CUevent` or :py:obj:`~.cudaEvent_t`\n Event allocated with :py:obj:`~.cudaEventInterprocess` and\n :py:obj:`~.cudaEventDisableTiming` flags.\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidResourceHandle`, :py:obj:`~.cudaErrorMemoryAllocation`, :py:obj:`~.cudaErrorMapBufferObjectFailed`, :py:obj:`~.cudaErrorNotSupported`, :py:obj:`~.cudaErrorInvalidValue`\nhandle : :py:obj:`~.cudaIpcEventHandle_t`\n Pointer to a user allocated cudaIpcEventHandle in which to return\n the opaque event handle\n\nSee Also\n--------\n:py:obj:`~.cudaEventCreate`, :py:obj:`~.cudaEventDestroy`, :py:obj:`~.cudaEventSynchronize`, :py:obj:`~.cudaEventQuery`, :py:obj:`~.cudaStreamWait""Event`, :py:obj:`~.cudaIpcOpenEventHandle`, :py:obj:`~.cudaIpcGetMemHandle`, :py:obj:`~.cudaIpcOpenMemHandle`, :py:obj:`~.cudaIpcCloseMemHandle`, :py:obj:`~.cuIpcGetEventHandle`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_21cudaIpcGetEventHandle = {"cudaIpcGetEventHandle", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_21cudaIpcGetEventHandle, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_20cudaIpcGetEventHandle}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_21cudaIpcGetEventHandle(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_event = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaIpcGetEventHandle (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_event_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 14379, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14379, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaIpcGetEventHandle", 0) < (0)) __PYX_ERR(0, 14379, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaIpcGetEventHandle", 1, 1, 1, i); __PYX_ERR(0, 14379, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14379, __pyx_L3_error) } __pyx_v_event = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaIpcGetEventHandle", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 14379, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaIpcGetEventHandle", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_20cudaIpcGetEventHandle(__pyx_self, __pyx_v_event); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_20cudaIpcGetEventHandle(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_event) { cudaEvent_t __pyx_v_cyevent; PyObject *__pyx_v_pevent = NULL; struct __pyx_obj_4cuda_8bindings_7runtime_cudaIpcEventHandle_t *__pyx_v_handle = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaIpcGetEventHandle", 0); /* "cuda/bindings/runtime.pyx":14423 * """ * cdef cyruntime.cudaEvent_t cyevent * if event is None: # <<<<<<<<<<<<<< * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): */ __pyx_t_1 = (__pyx_v_event == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":14424 * cdef cyruntime.cudaEvent_t cyevent * if event is None: * pevent = 0 # <<<<<<<<<<<<<< * elif isinstance(event, (cudaEvent_t,driver.CUevent)): * pevent = int(event) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pevent = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":14423 * """ * cdef cyruntime.cudaEvent_t cyevent * if event is None: # <<<<<<<<<<<<<< * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":14425 * if event is None: * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): # <<<<<<<<<<<<<< * pevent = int(event) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_event, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_event, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUevent); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":14426 * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): * pevent = int(event) # <<<<<<<<<<<<<< * else: * pevent = int(cudaEvent_t(event)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_event); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14426, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pevent = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":14425 * if event is None: * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): # <<<<<<<<<<<<<< * pevent = int(event) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":14428 * pevent = int(event) * else: * pevent = int(cudaEvent_t(event)) # <<<<<<<<<<<<<< * cyevent = pevent * cdef cudaIpcEventHandle_t handle = cudaIpcEventHandle_t() */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_event}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14428, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14428, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pevent = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":14429 * else: * pevent = int(cudaEvent_t(event)) * cyevent = pevent # <<<<<<<<<<<<<< * cdef cudaIpcEventHandle_t handle = cudaIpcEventHandle_t() * with nogil: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pevent); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 14429, __pyx_L1_error) __pyx_v_cyevent = ((cudaEvent_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":14430 * pevent = int(cudaEvent_t(event)) * cyevent = pevent * cdef cudaIpcEventHandle_t handle = cudaIpcEventHandle_t() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaIpcGetEventHandle(handle._pvt_ptr, cyevent) */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaIpcEventHandle_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaIpcEventHandle_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, NULL}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (1-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14430, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_handle = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaIpcEventHandle_t *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":14431 * cyevent = pevent * cdef cudaIpcEventHandle_t handle = cudaIpcEventHandle_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaIpcGetEventHandle(handle._pvt_ptr, cyevent) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":14432 * cdef cudaIpcEventHandle_t handle = cudaIpcEventHandle_t() * with nogil: * err = cyruntime.cudaIpcGetEventHandle(handle._pvt_ptr, cyevent) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaIpcGetEventHandle(((cudaIpcEventHandle_t *)__pyx_v_handle->__pyx_base._pvt_ptr), __pyx_v_cyevent); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 14432, __pyx_L7_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":14431 * cyevent = pevent * cdef cudaIpcEventHandle_t handle = cudaIpcEventHandle_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaIpcGetEventHandle(handle._pvt_ptr, cyevent) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":14433 * with nogil: * err = cyruntime.cudaIpcGetEventHandle(handle._pvt_ptr, cyevent) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], handle) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":14434 * err = cyruntime.cudaIpcGetEventHandle(handle._pvt_ptr, cyevent) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], handle) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14434, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14434, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14434, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14434, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 14434, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, Py_None) != (0)) __PYX_ERR(0, 14434, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":14433 * with nogil: * err = cyruntime.cudaIpcGetEventHandle(handle._pvt_ptr, cyevent) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], handle) */ } /* "cuda/bindings/runtime.pyx":14435 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], handle) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14435, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14435, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14435, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14435, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 14435, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_handle); __Pyx_GIVEREF((PyObject *)__pyx_v_handle); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, ((PyObject *)__pyx_v_handle)) != (0)) __PYX_ERR(0, 14435, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":14379 * return (_dict_cudaError_t[err], pypciBusId) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaIpcGetEventHandle(event): * """ Gets an interprocess handle for a previously allocated event. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaIpcGetEventHandle", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pevent); __Pyx_XDECREF((PyObject *)__pyx_v_handle); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":14437 * return (_dict_cudaError_t[err], handle) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaIpcOpenEventHandle(handle not None : cudaIpcEventHandle_t): * """ Opens an interprocess event handle for use in the current process. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_23cudaIpcOpenEventHandle(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_22cudaIpcOpenEventHandle, "cudaIpcOpenEventHandle(cudaIpcEventHandle_t handle: cudaIpcEventHandle_t)\n\nOpens an interprocess event handle for use in the current process.\n\nOpens an interprocess event handle exported from another process with\n:py:obj:`~.cudaIpcGetEventHandle`. This function returns a\n:py:obj:`~.cudaEvent_t` that behaves like a locally created event with\nthe :py:obj:`~.cudaEventDisableTiming` flag specified. This event must\nbe freed with :py:obj:`~.cudaEventDestroy`.\n\nPerforming operations on the imported event after the exported event\nhas been freed with :py:obj:`~.cudaEventDestroy` will result in\nundefined behavior.\n\nIPC functionality is restricted to devices with support for unified\naddressing on Linux and Windows operating systems. IPC functionality on\nWindows is supported for compatibility purposes but not recommended as\nit comes with performance cost. Users can test their device for IPC\nfunctionality by calling :py:obj:`~.cudaDeviceGetAttribute` with\n:py:obj:`~.cudaDevAttrIpcEventSupport`\n\nParameters\n----------\nhandle : :py:obj:`~.cudaIpcEventHandle_t`\n Interprocess handle to open\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorMapBufferObjectFailed`, :py:obj:`~.cudaErrorNotSupported`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorDeviceUninitialized`\nevent : :py:obj:`~.cudaEvent_t`\n Returns the imported event\n\nSee Also\n--------\n:py:obj:`~.cudaEventCreate`, :py:obj:`~.cudaEventDestroy`, :py:obj:`~.cudaEventSynchronize`, :py:obj:`~.cudaEventQuery`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cudaIpcGetEventHandle`, :py:obj:`~.cudaIpcGetMemHandle`, :py:obj:`~.cudaIpcOpenMemHandle`, :py:obj:`~.cudaIpcCloseMemHandle`, :py:obj:`~.cuIpcOpenEventHandle`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_23cudaIpcOpenEventHandle = {"cudaIpcOpenEventHandle", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_23cudaIpcOpenEventHandle, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_22cudaIpcOpenEventHandle}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_23cudaIpcOpenEventHandle(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaIpcEventHandle_t *__pyx_v_handle = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaIpcOpenEventHandle (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_handle_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 14437, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14437, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaIpcOpenEventHandle", 0) < (0)) __PYX_ERR(0, 14437, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaIpcOpenEventHandle", 1, 1, 1, i); __PYX_ERR(0, 14437, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14437, __pyx_L3_error) } __pyx_v_handle = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaIpcEventHandle_t *)values[0]); } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaIpcOpenEventHandle", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 14437, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaIpcOpenEventHandle", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_handle), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaIpcEventHandle_t, 0, "handle", 0))) __PYX_ERR(0, 14438, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_22cudaIpcOpenEventHandle(__pyx_self, __pyx_v_handle); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_22cudaIpcOpenEventHandle(CYTHON_UNUSED PyObject *__pyx_self, struct __pyx_obj_4cuda_8bindings_7runtime_cudaIpcEventHandle_t *__pyx_v_handle) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaEvent_t *__pyx_v_event = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; cudaError_t __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaIpcOpenEventHandle", 0); /* "cuda/bindings/runtime.pyx":14474 * :py:obj:`~.cudaEventCreate`, :py:obj:`~.cudaEventDestroy`, :py:obj:`~.cudaEventSynchronize`, :py:obj:`~.cudaEventQuery`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cudaIpcGetEventHandle`, :py:obj:`~.cudaIpcGetMemHandle`, :py:obj:`~.cudaIpcOpenMemHandle`, :py:obj:`~.cudaIpcCloseMemHandle`, :py:obj:`~.cuIpcOpenEventHandle` * """ * cdef cudaEvent_t event = cudaEvent_t() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaIpcOpenEventHandle(event._pvt_ptr, handle._pvt_ptr[0]) */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (1-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14474, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_event = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEvent_t *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":14475 * """ * cdef cudaEvent_t event = cudaEvent_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaIpcOpenEventHandle(event._pvt_ptr, handle._pvt_ptr[0]) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":14476 * cdef cudaEvent_t event = cudaEvent_t() * with nogil: * err = cyruntime.cudaIpcOpenEventHandle(event._pvt_ptr, handle._pvt_ptr[0]) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_5 = __pyx_f_4cuda_8bindings_9cyruntime_cudaIpcOpenEventHandle(((cudaEvent_t *)__pyx_v_event->__pyx_base._pvt_ptr), (__pyx_v_handle->__pyx_base._pvt_ptr[0])); if (unlikely(__pyx_t_5 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 14476, __pyx_L4_error) __pyx_v_err = __pyx_t_5; } /* "cuda/bindings/runtime.pyx":14475 * """ * cdef cudaEvent_t event = cudaEvent_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaIpcOpenEventHandle(event._pvt_ptr, handle._pvt_ptr[0]) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":14477 * with nogil: * err = cyruntime.cudaIpcOpenEventHandle(event._pvt_ptr, handle._pvt_ptr[0]) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], event) */ __pyx_t_6 = (__pyx_v_err != cudaSuccess); if (__pyx_t_6) { /* "cuda/bindings/runtime.pyx":14478 * err = cyruntime.cudaIpcOpenEventHandle(event._pvt_ptr, handle._pvt_ptr[0]) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], event) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14478, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14478, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14478, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14478, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 14478, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 14478, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":14477 * with nogil: * err = cyruntime.cudaIpcOpenEventHandle(event._pvt_ptr, handle._pvt_ptr[0]) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], event) */ } /* "cuda/bindings/runtime.pyx":14479 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], event) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14479, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14479, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14479, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14479, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 14479, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_event); __Pyx_GIVEREF((PyObject *)__pyx_v_event); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_event)) != (0)) __PYX_ERR(0, 14479, __pyx_L1_error); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":14437 * return (_dict_cudaError_t[err], handle) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaIpcOpenEventHandle(handle not None : cudaIpcEventHandle_t): * """ Opens an interprocess event handle for use in the current process. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaIpcOpenEventHandle", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_event); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":14481 * return (_dict_cudaError_t[err], event) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaIpcGetMemHandle(devPtr): * """ Gets an interprocess memory handle for an existing device memory allocation. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_25cudaIpcGetMemHandle(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_24cudaIpcGetMemHandle, "cudaIpcGetMemHandle(devPtr)\n\nGets an interprocess memory handle for an existing device memory allocation.\n\nTakes a pointer to the base of an existing device memory allocation\ncreated with :py:obj:`~.cudaMalloc` and exports it for use in another\nprocess. This is a lightweight operation and may be called multiple\ntimes on an allocation without adverse effects.\n\nIf a region of memory is freed with :py:obj:`~.cudaFree` and a\nsubsequent call to :py:obj:`~.cudaMalloc` returns memory with the same\ndevice address, :py:obj:`~.cudaIpcGetMemHandle` will return a unique\nhandle for the new memory.\n\nIPC functionality is restricted to devices with support for unified\naddressing on Linux and Windows operating systems. IPC functionality on\nWindows is supported for compatibility purposes but not recommended as\nit comes with performance cost. Users can test their device for IPC\nfunctionality by calling :py:obj:`~.cudaDeviceGetAttribute` with\n:py:obj:`~.cudaDevAttrIpcEventSupport`\n\nParameters\n----------\ndevPtr : Any\n Base pointer to previously allocated device memory\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorMemoryAllocation`, :py:obj:`~.cudaErrorMapBufferObjectFailed`, :py:obj:`~.cudaErrorNotSupported`, :py:obj:`~.cudaErrorInvalidValue`\nhandle : :py:obj:`~.cudaIpcMemHandle_t`\n Pointer to user allocated :py:obj:`~.cudaIpcMemHandle` to return\n the handle in.\n\nSee Also\n--------\n:py:obj:`~.cudaMalloc`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaIpcGetEventHandle`, :py:obj:`~.cudaIpcOpenEventHandle`, :py:obj:`~.cudaIpcOpenMemHandle`, :py:obj:`~.cudaIpcCloseMemHandle`, :py:obj:`~.cuIpcGetMemHandle`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_25cudaIpcGetMemHandle = {"cudaIpcGetMemHandle", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_25cudaIpcGetMemHandle, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_24cudaIpcGetMemHandle}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_25cudaIpcGetMemHandle(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_devPtr = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaIpcGetMemHandle (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_devPtr_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 14481, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14481, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaIpcGetMemHandle", 0) < (0)) __PYX_ERR(0, 14481, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaIpcGetMemHandle", 1, 1, 1, i); __PYX_ERR(0, 14481, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14481, __pyx_L3_error) } __pyx_v_devPtr = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaIpcGetMemHandle", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 14481, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaIpcGetMemHandle", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_24cudaIpcGetMemHandle(__pyx_self, __pyx_v_devPtr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_24cudaIpcGetMemHandle(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_devPtr) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaIpcMemHandle_t *__pyx_v_handle = 0; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cydevPtr = NULL; void *__pyx_v_cydevPtr_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_5; cudaError_t __pyx_t_6; int __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaIpcGetMemHandle", 0); /* "cuda/bindings/runtime.pyx":14519 * :py:obj:`~.cudaMalloc`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaIpcGetEventHandle`, :py:obj:`~.cudaIpcOpenEventHandle`, :py:obj:`~.cudaIpcOpenMemHandle`, :py:obj:`~.cudaIpcCloseMemHandle`, :py:obj:`~.cuIpcGetMemHandle` * """ * cdef cudaIpcMemHandle_t handle = cudaIpcMemHandle_t() # <<<<<<<<<<<<<< * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaIpcMemHandle_t); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaIpcMemHandle_t); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (1-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14519, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_handle = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaIpcMemHandle_t *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":14520 * """ * cdef cudaIpcMemHandle_t handle = cudaIpcMemHandle_t() * cydevPtr = _HelperInputVoidPtr(devPtr) # <<<<<<<<<<<<<< * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_devPtr}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14520, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_cydevPtr = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":14521 * cdef cudaIpcMemHandle_t handle = cudaIpcMemHandle_t() * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaIpcGetMemHandle(handle._pvt_ptr, cydevPtr_ptr) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cydevPtr), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14521, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_5 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 14521, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cydevPtr_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_5)); /* "cuda/bindings/runtime.pyx":14522 * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaIpcGetMemHandle(handle._pvt_ptr, cydevPtr_ptr) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":14523 * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: * err = cyruntime.cudaIpcGetMemHandle(handle._pvt_ptr, cydevPtr_ptr) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_6 = __pyx_f_4cuda_8bindings_9cyruntime_cudaIpcGetMemHandle(((cudaIpcMemHandle_t *)__pyx_v_handle->__pyx_base._pvt_ptr), __pyx_v_cydevPtr_ptr); if (unlikely(__pyx_t_6 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 14523, __pyx_L4_error) __pyx_v_err = __pyx_t_6; } /* "cuda/bindings/runtime.pyx":14522 * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaIpcGetMemHandle(handle._pvt_ptr, cydevPtr_ptr) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":14524 * with nogil: * err = cyruntime.cudaIpcGetMemHandle(handle._pvt_ptr, cydevPtr_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], handle) */ __pyx_t_7 = (__pyx_v_err != cudaSuccess); if (__pyx_t_7) { /* "cuda/bindings/runtime.pyx":14525 * err = cyruntime.cudaIpcGetMemHandle(handle._pvt_ptr, cydevPtr_ptr) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], handle) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14525, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14525, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14525, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14525, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 14525, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, Py_None) != (0)) __PYX_ERR(0, 14525, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":14524 * with nogil: * err = cyruntime.cudaIpcGetMemHandle(handle._pvt_ptr, cydevPtr_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], handle) */ } /* "cuda/bindings/runtime.pyx":14526 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], handle) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14526, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14526, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14526, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14526, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 14526, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_handle); __Pyx_GIVEREF((PyObject *)__pyx_v_handle); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, ((PyObject *)__pyx_v_handle)) != (0)) __PYX_ERR(0, 14526, __pyx_L1_error); __pyx_t_1 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":14481 * return (_dict_cudaError_t[err], event) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaIpcGetMemHandle(devPtr): * """ Gets an interprocess memory handle for an existing device memory allocation. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaIpcGetMemHandle", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_handle); __Pyx_XDECREF((PyObject *)__pyx_v_cydevPtr); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":14528 * return (_dict_cudaError_t[err], handle) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaIpcOpenMemHandle(handle not None : cudaIpcMemHandle_t, unsigned int flags): * """ Opens an interprocess memory handle exported from another process and returns a device pointer usable in the local process. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_27cudaIpcOpenMemHandle(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_26cudaIpcOpenMemHandle, "cudaIpcOpenMemHandle(cudaIpcMemHandle_t handle: cudaIpcMemHandle_t, unsigned int flags)\n\nOpens an interprocess memory handle exported from another process and returns a device pointer usable in the local process.\n\nMaps memory exported from another process with\n:py:obj:`~.cudaIpcGetMemHandle` into the current device address space.\nFor contexts on different devices :py:obj:`~.cudaIpcOpenMemHandle` can\nattempt to enable peer access between the devices as if the user called\n:py:obj:`~.cudaDeviceEnablePeerAccess`. This behavior is controlled by\nthe :py:obj:`~.cudaIpcMemLazyEnablePeerAccess` flag.\n:py:obj:`~.cudaDeviceCanAccessPeer` can determine if a mapping is\npossible.\n\n:py:obj:`~.cudaIpcOpenMemHandle` can open handles to devices that may\nnot be visible in the process calling the API.\n\nContexts that may open :py:obj:`~.cudaIpcMemHandles` are restricted in\nthe following way. :py:obj:`~.cudaIpcMemHandles` from each device in a\ngiven process may only be opened by one context per device per other\nprocess.\n\nIf the memory handle has already been opened by the current context,\nthe reference count on the handle is incremented by 1 and the existing\ndevice pointer is returned.\n\nMemory returned from :py:obj:`~.cudaIpcOpenMemHandle` must be freed\nwith :py:obj:`~.cudaIpcCloseMemHandle`.\n\nCalling :py:obj:`~.cudaFree` on an exported memory region before\ncalling :py:obj:`~.cudaIpcCloseMemHandle` in the importing context will\nresult in undefined behavior.\n\nIPC functionality is restricted to devices with support for unified\naddressing on Linux and Windows operating systems. IPC functionality on\nWindows is supported for compatibility purposes but not recommended as\nit comes with performance cost. Users can test their device for IPC\nfunctionality by calling :py:obj:`~.cudaDeviceGetAttribute` with\n:py:obj:`~.cudaDevAttrIpcEventSupport`\n\nParameters\n----------\nhandle : :py:obj:`~.cudaIpcMemHandle_t`\n :py:obj:`~.cudaIpcMemHandle` to open\nflags : u""nsigned int\n Flags for this operation. Must be specified as\n :py:obj:`~.cudaIpcMemLazyEnablePeerAccess`\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorMapBufferObjectFailed`, :py:obj:`~.cudaErrorInvalidResourceHandle`, :py:obj:`~.cudaErrorDeviceUninitialized`, :py:obj:`~.cudaErrorTooManyPeers`, :py:obj:`~.cudaErrorNotSupported`, :py:obj:`~.cudaErrorInvalidValue`\ndevPtr : Any\n Returned device pointer\n\nSee Also\n--------\n:py:obj:`~.cudaMalloc`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaIpcGetEventHandle`, :py:obj:`~.cudaIpcOpenEventHandle`, :py:obj:`~.cudaIpcGetMemHandle`, :py:obj:`~.cudaIpcCloseMemHandle`, :py:obj:`~.cudaDeviceEnablePeerAccess`, :py:obj:`~.cudaDeviceCanAccessPeer`, :py:obj:`~.cuIpcOpenMemHandle`\n\nNotes\n-----\nNo guarantees are made about the address returned in `*devPtr`. \n In particular, multiple processes may not receive the same address for the same `handle`."); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_27cudaIpcOpenMemHandle = {"cudaIpcOpenMemHandle", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_27cudaIpcOpenMemHandle, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_26cudaIpcOpenMemHandle}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_27cudaIpcOpenMemHandle(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaIpcMemHandle_t *__pyx_v_handle = 0; unsigned int __pyx_v_flags; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaIpcOpenMemHandle (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_handle_2,&__pyx_mstate_global->__pyx_n_u_flags_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 14528, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 14528, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14528, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaIpcOpenMemHandle", 0) < (0)) __PYX_ERR(0, 14528, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaIpcOpenMemHandle", 1, 2, 2, i); __PYX_ERR(0, 14528, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14528, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 14528, __pyx_L3_error) } __pyx_v_handle = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaIpcMemHandle_t *)values[0]); __pyx_v_flags = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_flags == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 14529, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaIpcOpenMemHandle", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 14528, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaIpcOpenMemHandle", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_handle), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaIpcMemHandle_t, 0, "handle", 0))) __PYX_ERR(0, 14529, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_26cudaIpcOpenMemHandle(__pyx_self, __pyx_v_handle, __pyx_v_flags); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_26cudaIpcOpenMemHandle(CYTHON_UNUSED PyObject *__pyx_self, struct __pyx_obj_4cuda_8bindings_7runtime_cudaIpcMemHandle_t *__pyx_v_handle, unsigned int __pyx_v_flags) { __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v_devPtr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations cudaError_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaIpcOpenMemHandle", 0); /* "cuda/bindings/runtime.pyx":14591 * In particular, multiple processes may not receive the same address for the same `handle`. * """ * cdef void_ptr devPtr = 0 # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaIpcOpenMemHandle(&devPtr, handle._pvt_ptr[0], flags) */ __pyx_v_devPtr = 0; /* "cuda/bindings/runtime.pyx":14592 * """ * cdef void_ptr devPtr = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaIpcOpenMemHandle(&devPtr, handle._pvt_ptr[0], flags) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":14593 * cdef void_ptr devPtr = 0 * with nogil: * err = cyruntime.cudaIpcOpenMemHandle(&devPtr, handle._pvt_ptr[0], flags) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_1 = __pyx_f_4cuda_8bindings_9cyruntime_cudaIpcOpenMemHandle(((void **)(&__pyx_v_devPtr)), (__pyx_v_handle->__pyx_base._pvt_ptr[0]), __pyx_v_flags); if (unlikely(__pyx_t_1 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 14593, __pyx_L4_error) __pyx_v_err = __pyx_t_1; } /* "cuda/bindings/runtime.pyx":14592 * """ * cdef void_ptr devPtr = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaIpcOpenMemHandle(&devPtr, handle._pvt_ptr[0], flags) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":14594 * with nogil: * err = cyruntime.cudaIpcOpenMemHandle(&devPtr, handle._pvt_ptr[0], flags) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], devPtr) */ __pyx_t_2 = (__pyx_v_err != cudaSuccess); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":14595 * err = cyruntime.cudaIpcOpenMemHandle(&devPtr, handle._pvt_ptr[0], flags) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], devPtr) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14595, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14595, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14595, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14595, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 14595, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, Py_None) != (0)) __PYX_ERR(0, 14595, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":14594 * with nogil: * err = cyruntime.cudaIpcOpenMemHandle(&devPtr, handle._pvt_ptr[0], flags) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], devPtr) */ } /* "cuda/bindings/runtime.pyx":14596 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], devPtr) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14596, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14596, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14596, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(__pyx_v_devPtr); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14596, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14596, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 14596, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_5) != (0)) __PYX_ERR(0, 14596, __pyx_L1_error); __pyx_t_3 = 0; __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":14528 * return (_dict_cudaError_t[err], handle) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaIpcOpenMemHandle(handle not None : cudaIpcMemHandle_t, unsigned int flags): * """ Opens an interprocess memory handle exported from another process and returns a device pointer usable in the local process. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaIpcOpenMemHandle", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":14598 * return (_dict_cudaError_t[err], devPtr) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaIpcCloseMemHandle(devPtr): * """ Attempts to close memory mapped with cudaIpcOpenMemHandle. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_29cudaIpcCloseMemHandle(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_28cudaIpcCloseMemHandle, "cudaIpcCloseMemHandle(devPtr)\n\nAttempts to close memory mapped with cudaIpcOpenMemHandle.\n\nDecrements the reference count of the memory returnd by\n:py:obj:`~.cudaIpcOpenMemHandle` by 1. When the reference count reaches\n0, this API unmaps the memory. The original allocation in the exporting\nprocess as well as imported mappings in other processes will be\nunaffected.\n\nAny resources used to enable peer access will be freed if this is the\nlast mapping using them.\n\nIPC functionality is restricted to devices with support for unified\naddressing on Linux and Windows operating systems. IPC functionality on\nWindows is supported for compatibility purposes but not recommended as\nit comes with performance cost. Users can test their device for IPC\nfunctionality by calling :py:obj:`~.cudaDeviceGetAttribute` with\n:py:obj:`~.cudaDevAttrIpcEventSupport`\n\nParameters\n----------\ndevPtr : Any\n Device pointer returned by :py:obj:`~.cudaIpcOpenMemHandle`\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorMapBufferObjectFailed`, :py:obj:`~.cudaErrorNotSupported`, :py:obj:`~.cudaErrorInvalidValue`\n\nSee Also\n--------\n:py:obj:`~.cudaMalloc`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaIpcGetEventHandle`, :py:obj:`~.cudaIpcOpenEventHandle`, :py:obj:`~.cudaIpcGetMemHandle`, :py:obj:`~.cudaIpcOpenMemHandle`, :py:obj:`~.cuIpcCloseMemHandle`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_29cudaIpcCloseMemHandle = {"cudaIpcCloseMemHandle", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_29cudaIpcCloseMemHandle, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_28cudaIpcCloseMemHandle}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_29cudaIpcCloseMemHandle(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_devPtr = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaIpcCloseMemHandle (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_devPtr_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 14598, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14598, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaIpcCloseMemHandle", 0) < (0)) __PYX_ERR(0, 14598, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaIpcCloseMemHandle", 1, 1, 1, i); __PYX_ERR(0, 14598, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14598, __pyx_L3_error) } __pyx_v_devPtr = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaIpcCloseMemHandle", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 14598, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaIpcCloseMemHandle", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_28cudaIpcCloseMemHandle(__pyx_self, __pyx_v_devPtr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_28cudaIpcCloseMemHandle(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_devPtr) { struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cydevPtr = NULL; void *__pyx_v_cydevPtr_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_5; cudaError_t __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaIpcCloseMemHandle", 0); /* "cuda/bindings/runtime.pyx":14632 * :py:obj:`~.cudaMalloc`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaIpcGetEventHandle`, :py:obj:`~.cudaIpcOpenEventHandle`, :py:obj:`~.cudaIpcGetMemHandle`, :py:obj:`~.cudaIpcOpenMemHandle`, :py:obj:`~.cuIpcCloseMemHandle` * """ * cydevPtr = _HelperInputVoidPtr(devPtr) # <<<<<<<<<<<<<< * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_devPtr}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14632, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_cydevPtr = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":14633 * """ * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaIpcCloseMemHandle(cydevPtr_ptr) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cydevPtr), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14633, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_5 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 14633, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cydevPtr_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_5)); /* "cuda/bindings/runtime.pyx":14634 * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaIpcCloseMemHandle(cydevPtr_ptr) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":14635 * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: * err = cyruntime.cudaIpcCloseMemHandle(cydevPtr_ptr) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_6 = __pyx_f_4cuda_8bindings_9cyruntime_cudaIpcCloseMemHandle(__pyx_v_cydevPtr_ptr); if (unlikely(__pyx_t_6 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 14635, __pyx_L4_error) __pyx_v_err = __pyx_t_6; } /* "cuda/bindings/runtime.pyx":14634 * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaIpcCloseMemHandle(cydevPtr_ptr) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":14636 * with nogil: * err = cyruntime.cudaIpcCloseMemHandle(cydevPtr_ptr) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14636, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14636, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14636, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14636, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 14636, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":14598 * return (_dict_cudaError_t[err], devPtr) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaIpcCloseMemHandle(devPtr): * """ Attempts to close memory mapped with cudaIpcOpenMemHandle. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaIpcCloseMemHandle", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_cydevPtr); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":14638 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceFlushGPUDirectRDMAWrites(target not None : cudaFlushGPUDirectRDMAWritesTarget, scope not None : cudaFlushGPUDirectRDMAWritesScope): * """ Blocks until remote writes are visible to the specified scope. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_31cudaDeviceFlushGPUDirectRDMAWrites(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_30cudaDeviceFlushGPUDirectRDMAWrites, "cudaDeviceFlushGPUDirectRDMAWrites(target: cudaFlushGPUDirectRDMAWritesTarget, scope: cudaFlushGPUDirectRDMAWritesScope)\n\nBlocks until remote writes are visible to the specified scope.\n\nBlocks until remote writes to the target context via mappings created\nthrough GPUDirect RDMA APIs, like nvidia_p2p_get_pages (see\nhttps://docs.nvidia.com/cuda/gpudirect-rdma for more information), are\nvisible to the specified scope.\n\nIf the scope equals or lies within the scope indicated by\n:py:obj:`~.cudaDevAttrGPUDirectRDMAWritesOrdering`, the call will be a\nno-op and can be safely omitted for performance. This can be determined\nby comparing the numerical values between the two enums, with smaller\nscopes having smaller values.\n\nUsers may query support for this API via\n:py:obj:`~.cudaDevAttrGPUDirectRDMAFlushWritesOptions`.\n\nParameters\n----------\ntarget : :py:obj:`~.cudaFlushGPUDirectRDMAWritesTarget`\n The target of the operation, see cudaFlushGPUDirectRDMAWritesTarget\nscope : :py:obj:`~.cudaFlushGPUDirectRDMAWritesScope`\n The scope of the operation, see cudaFlushGPUDirectRDMAWritesScope\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorNotSupported`,\n\nSee Also\n--------\n:py:obj:`~.cuFlushGPUDirectRDMAWrites`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_31cudaDeviceFlushGPUDirectRDMAWrites = {"cudaDeviceFlushGPUDirectRDMAWrites", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_31cudaDeviceFlushGPUDirectRDMAWrites, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_30cudaDeviceFlushGPUDirectRDMAWrites}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_31cudaDeviceFlushGPUDirectRDMAWrites(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_target = 0; PyObject *__pyx_v_scope = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaDeviceFlushGPUDirectRDMAWrites (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_target,&__pyx_mstate_global->__pyx_n_u_scope,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 14638, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 14638, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14638, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaDeviceFlushGPUDirectRDMAWrites", 0) < (0)) __PYX_ERR(0, 14638, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaDeviceFlushGPUDirectRDMAWrites", 1, 2, 2, i); __PYX_ERR(0, 14638, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14638, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 14638, __pyx_L3_error) } __pyx_v_target = values[0]; __pyx_v_scope = values[1]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaDeviceFlushGPUDirectRDMAWrites", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 14638, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceFlushGPUDirectRDMAWrites", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_target) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "target"); __PYX_ERR(0, 14639, __pyx_L1_error) } if (unlikely(((PyObject *)__pyx_v_scope) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "scope"); __PYX_ERR(0, 14639, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_30cudaDeviceFlushGPUDirectRDMAWrites(__pyx_self, __pyx_v_target, __pyx_v_scope); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_30cudaDeviceFlushGPUDirectRDMAWrites(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_target, PyObject *__pyx_v_scope) { enum cudaFlushGPUDirectRDMAWritesTarget __pyx_v_cytarget; enum cudaFlushGPUDirectRDMAWritesScope __pyx_v_cyscope; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; enum cudaFlushGPUDirectRDMAWritesTarget __pyx_t_2; enum cudaFlushGPUDirectRDMAWritesScope __pyx_t_3; cudaError_t __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaDeviceFlushGPUDirectRDMAWrites", 0); /* "cuda/bindings/runtime.pyx":14672 * :py:obj:`~.cuFlushGPUDirectRDMAWrites` * """ * cdef cyruntime.cudaFlushGPUDirectRDMAWritesTarget cytarget = target.value # <<<<<<<<<<<<<< * cdef cyruntime.cudaFlushGPUDirectRDMAWritesScope cyscope = scope.value * with nogil: */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_target, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14672, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = ((enum cudaFlushGPUDirectRDMAWritesTarget)__Pyx_PyLong_As_enum__cudaFlushGPUDirectRDMAWritesTarget(__pyx_t_1)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 14672, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cytarget = __pyx_t_2; /* "cuda/bindings/runtime.pyx":14673 * """ * cdef cyruntime.cudaFlushGPUDirectRDMAWritesTarget cytarget = target.value * cdef cyruntime.cudaFlushGPUDirectRDMAWritesScope cyscope = scope.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaDeviceFlushGPUDirectRDMAWrites(cytarget, cyscope) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_scope, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14673, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = ((enum cudaFlushGPUDirectRDMAWritesScope)__Pyx_PyLong_As_enum__cudaFlushGPUDirectRDMAWritesScope(__pyx_t_1)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 14673, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cyscope = __pyx_t_3; /* "cuda/bindings/runtime.pyx":14674 * cdef cyruntime.cudaFlushGPUDirectRDMAWritesTarget cytarget = target.value * cdef cyruntime.cudaFlushGPUDirectRDMAWritesScope cyscope = scope.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceFlushGPUDirectRDMAWrites(cytarget, cyscope) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":14675 * cdef cyruntime.cudaFlushGPUDirectRDMAWritesScope cyscope = scope.value * with nogil: * err = cyruntime.cudaDeviceFlushGPUDirectRDMAWrites(cytarget, cyscope) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_4 = __pyx_f_4cuda_8bindings_9cyruntime_cudaDeviceFlushGPUDirectRDMAWrites(__pyx_v_cytarget, __pyx_v_cyscope); if (unlikely(__pyx_t_4 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 14675, __pyx_L4_error) __pyx_v_err = __pyx_t_4; } /* "cuda/bindings/runtime.pyx":14674 * cdef cyruntime.cudaFlushGPUDirectRDMAWritesTarget cytarget = target.value * cdef cyruntime.cudaFlushGPUDirectRDMAWritesScope cyscope = scope.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceFlushGPUDirectRDMAWrites(cytarget, cyscope) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":14676 * with nogil: * err = cyruntime.cudaDeviceFlushGPUDirectRDMAWrites(cytarget, cyscope) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * ctypedef struct cudaAsyncCallbackData_st: */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14676, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14676, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14676, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14676, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 14676, __pyx_L1_error); __pyx_t_6 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":14638 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceFlushGPUDirectRDMAWrites(target not None : cudaFlushGPUDirectRDMAWritesTarget, scope not None : cudaFlushGPUDirectRDMAWritesScope): * """ Blocks until remote writes are visible to the specified scope. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceFlushGPUDirectRDMAWrites", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":14684 * ctypedef cudaAsyncCallbackData_st cudaAsyncCallbackData * * @cython.show_performance_hints(False) # <<<<<<<<<<<<<< * cdef void cudaAsyncNotificationCallbackWrapper(cyruntime.cudaAsyncNotificationInfo_t *info, void *data, cyruntime.cudaAsyncCallbackHandle_t handle) nogil: * cdef cudaAsyncCallbackData *cbData = data */ static void __pyx_f_4cuda_8bindings_7runtime_cudaAsyncNotificationCallbackWrapper(cudaAsyncNotificationInfo_t *__pyx_v_info, void *__pyx_v_data, cudaAsyncCallbackHandle_t __pyx_v_handle) { __pyx_t_4cuda_8bindings_7runtime_cudaAsyncCallbackData *__pyx_v_cbData; /* "cuda/bindings/runtime.pyx":14686 * @cython.show_performance_hints(False) * cdef void cudaAsyncNotificationCallbackWrapper(cyruntime.cudaAsyncNotificationInfo_t *info, void *data, cyruntime.cudaAsyncCallbackHandle_t handle) nogil: * cdef cudaAsyncCallbackData *cbData = data # <<<<<<<<<<<<<< * with gil: * cbData.callback(info, cbData.userData, handle) */ __pyx_v_cbData = ((__pyx_t_4cuda_8bindings_7runtime_cudaAsyncCallbackData *)__pyx_v_data); /* "cuda/bindings/runtime.pyx":14687 * cdef void cudaAsyncNotificationCallbackWrapper(cyruntime.cudaAsyncNotificationInfo_t *info, void *data, cyruntime.cudaAsyncCallbackHandle_t handle) nogil: * cdef cudaAsyncCallbackData *cbData = data * with gil: # <<<<<<<<<<<<<< * cbData.callback(info, cbData.userData, handle) * */ { PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); /*try:*/ { /* "cuda/bindings/runtime.pyx":14688 * cdef cudaAsyncCallbackData *cbData = data * with gil: * cbData.callback(info, cbData.userData, handle) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __pyx_v_cbData->callback(__pyx_v_info, __pyx_v_cbData->userData, __pyx_v_handle); } /* "cuda/bindings/runtime.pyx":14687 * cdef void cudaAsyncNotificationCallbackWrapper(cyruntime.cudaAsyncNotificationInfo_t *info, void *data, cyruntime.cudaAsyncCallbackHandle_t handle) nogil: * cdef cudaAsyncCallbackData *cbData = data * with gil: # <<<<<<<<<<<<<< * cbData.callback(info, cbData.userData, handle) * */ /*finally:*/ { /*normal exit:*/{ __Pyx_PyGILState_Release(__pyx_gilstate_save); goto __pyx_L5; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":14684 * ctypedef cudaAsyncCallbackData_st cudaAsyncCallbackData * * @cython.show_performance_hints(False) # <<<<<<<<<<<<<< * cdef void cudaAsyncNotificationCallbackWrapper(cyruntime.cudaAsyncNotificationInfo_t *info, void *data, cyruntime.cudaAsyncCallbackHandle_t handle) nogil: * cdef cudaAsyncCallbackData *cbData = data */ /* function exit code */ } /* "cuda/bindings/runtime.pyx":14690 * cbData.callback(info, cbData.userData, handle) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceRegisterAsyncNotification(int device, callbackFunc, userData): * """ Registers a callback function to receive async notifications. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_33cudaDeviceRegisterAsyncNotification(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_32cudaDeviceRegisterAsyncNotification, "cudaDeviceRegisterAsyncNotification(int device, callbackFunc, userData)\n\nRegisters a callback function to receive async notifications.\n\nRegisters `callbackFunc` to receive async notifications.\n\nThe `userData` parameter is passed to the callback function at async\nnotification time. Likewise, `callback` is also passed to the callback\nfunction to distinguish between multiple registered callbacks.\n\nThe callback function being registered should be designed to return\nquickly (~10ms). Any long running tasks should be queued for execution\non an application thread.\n\nCallbacks may not call cudaDeviceRegisterAsyncNotification or\ncudaDeviceUnregisterAsyncNotification. Doing so will result in\n:py:obj:`~.cudaErrorNotPermitted`. Async notification callbacks execute\nin an undefined order and may be serialized.\n\nReturns in `*callback` a handle representing the registered callback\ninstance.\n\nParameters\n----------\ndevice : int\n The device on which to register the callback\ncallbackFunc : :py:obj:`~.cudaAsyncCallback`\n The function to register as a callback\nuserData : Any\n A generic pointer to user data. This is passed into the callback\n function.\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess` :py:obj:`~.cudaErrorNotSupported` :py:obj:`~.cudaErrorInvalidDevice` :py:obj:`~.cudaErrorInvalidValue` :py:obj:`~.cudaErrorNotPermitted` :py:obj:`~.cudaErrorUnknown`\ncallback : :py:obj:`~.cudaAsyncCallbackHandle_t`\n A handle representing the registered callback instance\n\nSee Also\n--------\n:py:obj:`~.cudaDeviceUnregisterAsyncNotification`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_33cudaDeviceRegisterAsyncNotification = {"cudaDeviceRegisterAsyncNotification", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_33cudaDeviceRegisterAsyncNotification, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_32cudaDeviceRegisterAsyncNotification}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_33cudaDeviceRegisterAsyncNotification(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { int __pyx_v_device; PyObject *__pyx_v_callbackFunc = 0; PyObject *__pyx_v_userData = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[3] = {0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaDeviceRegisterAsyncNotification (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_callbackFunc,&__pyx_mstate_global->__pyx_n_u_userData_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 14690, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 14690, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 14690, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14690, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaDeviceRegisterAsyncNotification", 0) < (0)) __PYX_ERR(0, 14690, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 3; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaDeviceRegisterAsyncNotification", 1, 3, 3, i); __PYX_ERR(0, 14690, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 3)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14690, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 14690, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 14690, __pyx_L3_error) } __pyx_v_device = __Pyx_PyLong_As_int(values[0]); if (unlikely((__pyx_v_device == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 14691, __pyx_L3_error) __pyx_v_callbackFunc = values[1]; __pyx_v_userData = values[2]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaDeviceRegisterAsyncNotification", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 14690, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceRegisterAsyncNotification", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_32cudaDeviceRegisterAsyncNotification(__pyx_self, __pyx_v_device, __pyx_v_callbackFunc, __pyx_v_userData); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_32cudaDeviceRegisterAsyncNotification(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_device, PyObject *__pyx_v_callbackFunc, PyObject *__pyx_v_userData) { cudaAsyncCallback __pyx_v_cycallbackFunc; PyObject *__pyx_v_pcallbackFunc = NULL; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cyuserData = NULL; void *__pyx_v_cyuserData_ptr; __pyx_t_4cuda_8bindings_7runtime_cudaAsyncCallbackData *__pyx_v_cbData; struct __pyx_obj_4cuda_8bindings_7runtime_cudaAsyncCallbackHandle_t *__pyx_v_callback = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaDeviceRegisterAsyncNotification", 0); /* "cuda/bindings/runtime.pyx":14734 * """ * cdef cyruntime.cudaAsyncCallback cycallbackFunc * if callbackFunc is None: # <<<<<<<<<<<<<< * pcallbackFunc = 0 * elif isinstance(callbackFunc, (cudaAsyncCallback,)): */ __pyx_t_1 = (__pyx_v_callbackFunc == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":14735 * cdef cyruntime.cudaAsyncCallback cycallbackFunc * if callbackFunc is None: * pcallbackFunc = 0 # <<<<<<<<<<<<<< * elif isinstance(callbackFunc, (cudaAsyncCallback,)): * pcallbackFunc = int(callbackFunc) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pcallbackFunc = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":14734 * """ * cdef cyruntime.cudaAsyncCallback cycallbackFunc * if callbackFunc is None: # <<<<<<<<<<<<<< * pcallbackFunc = 0 * elif isinstance(callbackFunc, (cudaAsyncCallback,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":14736 * if callbackFunc is None: * pcallbackFunc = 0 * elif isinstance(callbackFunc, (cudaAsyncCallback,)): # <<<<<<<<<<<<<< * pcallbackFunc = int(callbackFunc) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_callbackFunc, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaAsyncCallback); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":14737 * pcallbackFunc = 0 * elif isinstance(callbackFunc, (cudaAsyncCallback,)): * pcallbackFunc = int(callbackFunc) # <<<<<<<<<<<<<< * else: * pcallbackFunc = int(cudaAsyncCallback(callbackFunc)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_callbackFunc); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14737, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_pcallbackFunc = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":14736 * if callbackFunc is None: * pcallbackFunc = 0 * elif isinstance(callbackFunc, (cudaAsyncCallback,)): # <<<<<<<<<<<<<< * pcallbackFunc = int(callbackFunc) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":14739 * pcallbackFunc = int(callbackFunc) * else: * pcallbackFunc = int(cudaAsyncCallback(callbackFunc)) # <<<<<<<<<<<<<< * cycallbackFunc = pcallbackFunc * cyuserData = _HelperInputVoidPtr(userData) */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaAsyncCallback); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaAsyncCallback); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_callbackFunc}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14739, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14739, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_pcallbackFunc = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":14740 * else: * pcallbackFunc = int(cudaAsyncCallback(callbackFunc)) * cycallbackFunc = pcallbackFunc # <<<<<<<<<<<<<< * cyuserData = _HelperInputVoidPtr(userData) * cdef void* cyuserData_ptr = cyuserData.cptr */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pcallbackFunc); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 14740, __pyx_L1_error) __pyx_v_cycallbackFunc = ((cudaAsyncCallback)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":14741 * pcallbackFunc = int(cudaAsyncCallback(callbackFunc)) * cycallbackFunc = pcallbackFunc * cyuserData = _HelperInputVoidPtr(userData) # <<<<<<<<<<<<<< * cdef void* cyuserData_ptr = cyuserData.cptr * */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_userData}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14741, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_4); } __pyx_v_cyuserData = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_4); __pyx_t_4 = 0; /* "cuda/bindings/runtime.pyx":14742 * cycallbackFunc = pcallbackFunc * cyuserData = _HelperInputVoidPtr(userData) * cdef void* cyuserData_ptr = cyuserData.cptr # <<<<<<<<<<<<<< * * cdef cudaAsyncCallbackData *cbData = NULL */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cyuserData), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14742, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_4); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 14742, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_cyuserData_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":14744 * cdef void* cyuserData_ptr = cyuserData.cptr * * cdef cudaAsyncCallbackData *cbData = NULL # <<<<<<<<<<<<<< * cbData = malloc(sizeof(cbData[0])) * if cbData == NULL: */ __pyx_v_cbData = NULL; /* "cuda/bindings/runtime.pyx":14745 * * cdef cudaAsyncCallbackData *cbData = NULL * cbData = malloc(sizeof(cbData[0])) # <<<<<<<<<<<<<< * if cbData == NULL: * return (cudaError_t.cudaErrorMemoryAllocation, None) */ __pyx_v_cbData = ((__pyx_t_4cuda_8bindings_7runtime_cudaAsyncCallbackData *)malloc((sizeof((__pyx_v_cbData[0]))))); /* "cuda/bindings/runtime.pyx":14746 * cdef cudaAsyncCallbackData *cbData = NULL * cbData = malloc(sizeof(cbData[0])) * if cbData == NULL: # <<<<<<<<<<<<<< * return (cudaError_t.cudaErrorMemoryAllocation, None) * cbData.callback = cycallbackFunc */ __pyx_t_1 = (__pyx_v_cbData == NULL); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":14747 * cbData = malloc(sizeof(cbData[0])) * if cbData == NULL: * return (cudaError_t.cudaErrorMemoryAllocation, None) # <<<<<<<<<<<<<< * cbData.callback = cycallbackFunc * cbData.userData = cyuserData_ptr */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14747, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_cudaErrorMemoryAllocation); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14747, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14747, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 14747, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, Py_None) != (0)) __PYX_ERR(0, 14747, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":14746 * cdef cudaAsyncCallbackData *cbData = NULL * cbData = malloc(sizeof(cbData[0])) * if cbData == NULL: # <<<<<<<<<<<<<< * return (cudaError_t.cudaErrorMemoryAllocation, None) * cbData.callback = cycallbackFunc */ } /* "cuda/bindings/runtime.pyx":14748 * if cbData == NULL: * return (cudaError_t.cudaErrorMemoryAllocation, None) * cbData.callback = cycallbackFunc # <<<<<<<<<<<<<< * cbData.userData = cyuserData_ptr * */ __pyx_v_cbData->callback = __pyx_v_cycallbackFunc; /* "cuda/bindings/runtime.pyx":14749 * return (cudaError_t.cudaErrorMemoryAllocation, None) * cbData.callback = cycallbackFunc * cbData.userData = cyuserData_ptr # <<<<<<<<<<<<<< * * cdef cudaAsyncCallbackHandle_t callback = cudaAsyncCallbackHandle_t() */ __pyx_v_cbData->userData = __pyx_v_cyuserData_ptr; /* "cuda/bindings/runtime.pyx":14751 * cbData.userData = cyuserData_ptr * * cdef cudaAsyncCallbackHandle_t callback = cudaAsyncCallbackHandle_t() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaDeviceRegisterAsyncNotification(device, cudaAsyncNotificationCallbackWrapper, cbData, callback._pvt_ptr) */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaAsyncCallbackHandle_t); __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaAsyncCallbackHandle_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, NULL}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14751, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_4); } __pyx_v_callback = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaAsyncCallbackHandle_t *)__pyx_t_4); __pyx_t_4 = 0; /* "cuda/bindings/runtime.pyx":14752 * * cdef cudaAsyncCallbackHandle_t callback = cudaAsyncCallbackHandle_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceRegisterAsyncNotification(device, cudaAsyncNotificationCallbackWrapper, cbData, callback._pvt_ptr) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":14753 * cdef cudaAsyncCallbackHandle_t callback = cudaAsyncCallbackHandle_t() * with nogil: * err = cyruntime.cudaDeviceRegisterAsyncNotification(device, cudaAsyncNotificationCallbackWrapper, cbData, callback._pvt_ptr) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * free(cbData) */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaDeviceRegisterAsyncNotification(__pyx_v_device, ((cudaAsyncCallback)__pyx_f_4cuda_8bindings_7runtime_cudaAsyncNotificationCallbackWrapper), ((void *)__pyx_v_cbData), ((cudaAsyncCallbackHandle_t *)__pyx_v_callback->_pvt_ptr)); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 14753, __pyx_L6_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":14752 * * cdef cudaAsyncCallbackHandle_t callback = cudaAsyncCallbackHandle_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceRegisterAsyncNotification(device, cudaAsyncNotificationCallbackWrapper, cbData, callback._pvt_ptr) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L7; } __pyx_L6_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L7:; } } /* "cuda/bindings/runtime.pyx":14754 * with nogil: * err = cyruntime.cudaDeviceRegisterAsyncNotification(device, cudaAsyncNotificationCallbackWrapper, cbData, callback._pvt_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * free(cbData) * else: */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":14755 * err = cyruntime.cudaDeviceRegisterAsyncNotification(device, cudaAsyncNotificationCallbackWrapper, cbData, callback._pvt_ptr) * if err != cyruntime.cudaSuccess: * free(cbData) # <<<<<<<<<<<<<< * else: * m_global._allocated[int(callback)] = cbData */ free(__pyx_v_cbData); /* "cuda/bindings/runtime.pyx":14754 * with nogil: * err = cyruntime.cudaDeviceRegisterAsyncNotification(device, cudaAsyncNotificationCallbackWrapper, cbData, callback._pvt_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * free(cbData) * else: */ goto __pyx_L8; } /* "cuda/bindings/runtime.pyx":14757 * free(cbData) * else: * m_global._allocated[int(callback)] = cbData # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ /*else*/ { __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_v_callback)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14757, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_4); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 14757, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; (__pyx_v_4cuda_8bindings_7runtime_m_global->_allocated[__pyx_t_6]) = __pyx_v_cbData; } __pyx_L8:; /* "cuda/bindings/runtime.pyx":14758 * else: * m_global._allocated[int(callback)] = cbData * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], callback) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":14759 * m_global._allocated[int(callback)] = cbData * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], callback) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14759, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14759, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14759, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14759, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 14759, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, Py_None) != (0)) __PYX_ERR(0, 14759, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":14758 * else: * m_global._allocated[int(callback)] = cbData * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], callback) */ } /* "cuda/bindings/runtime.pyx":14760 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], callback) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14760, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14760, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14760, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14760, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 14760, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_callback); __Pyx_GIVEREF((PyObject *)__pyx_v_callback); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, ((PyObject *)__pyx_v_callback)) != (0)) __PYX_ERR(0, 14760, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":14690 * cbData.callback(info, cbData.userData, handle) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceRegisterAsyncNotification(int device, callbackFunc, userData): * """ Registers a callback function to receive async notifications. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceRegisterAsyncNotification", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pcallbackFunc); __Pyx_XDECREF((PyObject *)__pyx_v_cyuserData); __Pyx_XDECREF((PyObject *)__pyx_v_callback); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":14762 * return (_dict_cudaError_t[err], callback) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceUnregisterAsyncNotification(int device, callback): * """ Unregisters an async notification callback. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_35cudaDeviceUnregisterAsyncNotification(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_34cudaDeviceUnregisterAsyncNotification, "cudaDeviceUnregisterAsyncNotification(int device, callback)\n\nUnregisters an async notification callback.\n\nUnregisters `callback` so that the corresponding callback function will\nstop receiving async notifications.\n\nParameters\n----------\ndevice : int\n The device from which to remove `callback`.\ncallback : :py:obj:`~.cudaAsyncCallbackHandle_t`\n The callback instance to unregister from receiving async\n notifications.\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess` :py:obj:`~.cudaErrorNotSupported` :py:obj:`~.cudaErrorInvalidDevice` :py:obj:`~.cudaErrorInvalidValue` :py:obj:`~.cudaErrorNotPermitted` :py:obj:`~.cudaErrorUnknown`\n\nSee Also\n--------\n:py:obj:`~.cudaDeviceRegisterAsyncNotification`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_35cudaDeviceUnregisterAsyncNotification = {"cudaDeviceUnregisterAsyncNotification", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_35cudaDeviceUnregisterAsyncNotification, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_34cudaDeviceUnregisterAsyncNotification}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_35cudaDeviceUnregisterAsyncNotification(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { int __pyx_v_device; PyObject *__pyx_v_callback = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaDeviceUnregisterAsyncNotification (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_callback,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 14762, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 14762, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14762, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaDeviceUnregisterAsyncNotification", 0) < (0)) __PYX_ERR(0, 14762, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaDeviceUnregisterAsyncNotification", 1, 2, 2, i); __PYX_ERR(0, 14762, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14762, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 14762, __pyx_L3_error) } __pyx_v_device = __Pyx_PyLong_As_int(values[0]); if (unlikely((__pyx_v_device == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 14763, __pyx_L3_error) __pyx_v_callback = values[1]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaDeviceUnregisterAsyncNotification", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 14762, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceUnregisterAsyncNotification", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_34cudaDeviceUnregisterAsyncNotification(__pyx_self, __pyx_v_device, __pyx_v_callback); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_34cudaDeviceUnregisterAsyncNotification(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_device, PyObject *__pyx_v_callback) { cudaAsyncCallbackHandle_t __pyx_v_cycallback; PyObject *__pyx_v_pcallback = NULL; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaDeviceUnregisterAsyncNotification", 0); /* "cuda/bindings/runtime.pyx":14787 * """ * cdef cyruntime.cudaAsyncCallbackHandle_t cycallback * if callback is None: # <<<<<<<<<<<<<< * pcallback = 0 * elif isinstance(callback, (cudaAsyncCallbackHandle_t,)): */ __pyx_t_1 = (__pyx_v_callback == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":14788 * cdef cyruntime.cudaAsyncCallbackHandle_t cycallback * if callback is None: * pcallback = 0 # <<<<<<<<<<<<<< * elif isinstance(callback, (cudaAsyncCallbackHandle_t,)): * pcallback = int(callback) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pcallback = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":14787 * """ * cdef cyruntime.cudaAsyncCallbackHandle_t cycallback * if callback is None: # <<<<<<<<<<<<<< * pcallback = 0 * elif isinstance(callback, (cudaAsyncCallbackHandle_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":14789 * if callback is None: * pcallback = 0 * elif isinstance(callback, (cudaAsyncCallbackHandle_t,)): # <<<<<<<<<<<<<< * pcallback = int(callback) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_callback, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaAsyncCallbackHandle_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":14790 * pcallback = 0 * elif isinstance(callback, (cudaAsyncCallbackHandle_t,)): * pcallback = int(callback) # <<<<<<<<<<<<<< * else: * pcallback = int(cudaAsyncCallbackHandle_t(callback)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_callback); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14790, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_pcallback = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":14789 * if callback is None: * pcallback = 0 * elif isinstance(callback, (cudaAsyncCallbackHandle_t,)): # <<<<<<<<<<<<<< * pcallback = int(callback) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":14792 * pcallback = int(callback) * else: * pcallback = int(cudaAsyncCallbackHandle_t(callback)) # <<<<<<<<<<<<<< * cycallback = pcallback * with nogil: */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaAsyncCallbackHandle_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaAsyncCallbackHandle_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_callback}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14792, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14792, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_pcallback = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":14793 * else: * pcallback = int(cudaAsyncCallbackHandle_t(callback)) * cycallback = pcallback # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaDeviceUnregisterAsyncNotification(device, cycallback) */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pcallback); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 14793, __pyx_L1_error) __pyx_v_cycallback = ((cudaAsyncCallbackHandle_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":14794 * pcallback = int(cudaAsyncCallbackHandle_t(callback)) * cycallback = pcallback * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceUnregisterAsyncNotification(device, cycallback) * if err == cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":14795 * cycallback = pcallback * with nogil: * err = cyruntime.cudaDeviceUnregisterAsyncNotification(device, cycallback) # <<<<<<<<<<<<<< * if err == cyruntime.cudaSuccess: * free(m_global._allocated[pcallback]) */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaDeviceUnregisterAsyncNotification(__pyx_v_device, __pyx_v_cycallback); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 14795, __pyx_L5_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":14794 * pcallback = int(cudaAsyncCallbackHandle_t(callback)) * cycallback = pcallback * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceUnregisterAsyncNotification(device, cycallback) * if err == cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L6; } __pyx_L5_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L6:; } } /* "cuda/bindings/runtime.pyx":14796 * with nogil: * err = cyruntime.cudaDeviceUnregisterAsyncNotification(device, cycallback) * if err == cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * free(m_global._allocated[pcallback]) * m_global._allocated.erase(pcallback) */ __pyx_t_1 = (__pyx_v_err == cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":14797 * err = cyruntime.cudaDeviceUnregisterAsyncNotification(device, cycallback) * if err == cyruntime.cudaSuccess: * free(m_global._allocated[pcallback]) # <<<<<<<<<<<<<< * m_global._allocated.erase(pcallback) * return (_dict_cudaError_t[err],) */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pcallback); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 14797, __pyx_L1_error) free((__pyx_v_4cuda_8bindings_7runtime_m_global->_allocated[__pyx_t_6])); /* "cuda/bindings/runtime.pyx":14798 * if err == cyruntime.cudaSuccess: * free(m_global._allocated[pcallback]) * m_global._allocated.erase(pcallback) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pcallback); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 14798, __pyx_L1_error) (void)(__pyx_v_4cuda_8bindings_7runtime_m_global->_allocated.erase(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6))); /* "cuda/bindings/runtime.pyx":14796 * with nogil: * err = cyruntime.cudaDeviceUnregisterAsyncNotification(device, cycallback) * if err == cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * free(m_global._allocated[pcallback]) * m_global._allocated.erase(pcallback) */ } /* "cuda/bindings/runtime.pyx":14799 * free(m_global._allocated[pcallback]) * m_global._allocated.erase(pcallback) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14799, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14799, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14799, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14799, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 14799, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":14762 * return (_dict_cudaError_t[err], callback) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceUnregisterAsyncNotification(int device, callback): * """ Unregisters an async notification callback. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceUnregisterAsyncNotification", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pcallback); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":14801 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceGetSharedMemConfig(): * """ Returns the shared memory configuration for the current device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_37cudaDeviceGetSharedMemConfig(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_36cudaDeviceGetSharedMemConfig, "cudaDeviceGetSharedMemConfig()\n\nReturns the shared memory configuration for the current device.\n\n[Deprecated]\n\nThis function will return in `pConfig` the current size of shared\nmemory banks on the current device. On devices with configurable shared\nmemory banks, :py:obj:`~.cudaDeviceSetSharedMemConfig` can be used to\nchange this setting, so that all subsequent kernel launches will by\ndefault use the new bank size. When\n:py:obj:`~.cudaDeviceGetSharedMemConfig` is called on devices without\nconfigurable shared memory, it will return the fixed bank size of the\nhardware.\n\nThe returned bank configurations can be either:\n\n- :py:obj:`~.cudaSharedMemBankSizeFourByte` - shared memory bank width\n is four bytes.\n\n- :py:obj:`~.cudaSharedMemBankSizeEightByte` - shared memory bank width\n is eight bytes.\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\npConfig : :py:obj:`~.cudaSharedMemConfig`\n Returned cache configuration\n\nSee Also\n--------\n:py:obj:`~.cudaDeviceSetCacheConfig`, :py:obj:`~.cudaDeviceGetCacheConfig`, :py:obj:`~.cudaDeviceSetSharedMemConfig`, :py:obj:`~.cudaFuncSetCacheConfig`, :py:obj:`~.cuCtxGetSharedMemConfig`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_37cudaDeviceGetSharedMemConfig = {"cudaDeviceGetSharedMemConfig", (PyCFunction)__pyx_pw_4cuda_8bindings_7runtime_37cudaDeviceGetSharedMemConfig, METH_NOARGS, __pyx_doc_4cuda_8bindings_7runtime_36cudaDeviceGetSharedMemConfig}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_37cudaDeviceGetSharedMemConfig(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaDeviceGetSharedMemConfig (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_36cudaDeviceGetSharedMemConfig(__pyx_self); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_36cudaDeviceGetSharedMemConfig(CYTHON_UNUSED PyObject *__pyx_self) { enum cudaSharedMemConfig __pyx_v_pConfig; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations cudaError_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; size_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaDeviceGetSharedMemConfig", 0); /* "cuda/bindings/runtime.pyx":14836 * """ * cdef cyruntime.cudaSharedMemConfig pConfig * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceGetSharedMemConfig(&pConfig) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":14837 * cdef cyruntime.cudaSharedMemConfig pConfig * with nogil: * err = cyruntime.cudaDeviceGetSharedMemConfig(&pConfig) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_1 = __pyx_f_4cuda_8bindings_9cyruntime_cudaDeviceGetSharedMemConfig((&__pyx_v_pConfig)); if (unlikely(__pyx_t_1 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 14837, __pyx_L4_error) __pyx_v_err = __pyx_t_1; } /* "cuda/bindings/runtime.pyx":14836 * """ * cdef cyruntime.cudaSharedMemConfig pConfig * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceGetSharedMemConfig(&pConfig) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":14838 * with nogil: * err = cyruntime.cudaDeviceGetSharedMemConfig(&pConfig) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], cudaSharedMemConfig(pConfig)) */ __pyx_t_2 = (__pyx_v_err != cudaSuccess); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":14839 * err = cyruntime.cudaDeviceGetSharedMemConfig(&pConfig) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], cudaSharedMemConfig(pConfig)) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14839, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14839, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14839, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14839, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 14839, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, Py_None) != (0)) __PYX_ERR(0, 14839, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":14838 * with nogil: * err = cyruntime.cudaDeviceGetSharedMemConfig(&pConfig) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], cudaSharedMemConfig(pConfig)) */ } /* "cuda/bindings/runtime.pyx":14840 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], cudaSharedMemConfig(pConfig)) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14840, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14840, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14840, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_4 = NULL; __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_mstate_global->__pyx_n_u_cudaSharedMemConfig); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14840, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyLong_From_enum__cudaSharedMemConfig(__pyx_v_pConfig); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 14840, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_8 = 1; #if CYTHON_UNPACK_METHODS if (unlikely(PyMethod_Check(__pyx_t_6))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_6); assert(__pyx_t_4); PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx__function); __Pyx_DECREF_SET(__pyx_t_6, __pyx__function); __pyx_t_8 = 0; } #endif { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_7}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_6, __pyx_callargs+__pyx_t_8, (2-__pyx_t_8) | (__pyx_t_8*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14840, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); } __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 14840, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 14840, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_5) != (0)) __PYX_ERR(0, 14840, __pyx_L1_error); __pyx_t_3 = 0; __pyx_t_5 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":14801 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceGetSharedMemConfig(): * """ Returns the shared memory configuration for the current device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceGetSharedMemConfig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":14842 * return (_dict_cudaError_t[err], cudaSharedMemConfig(pConfig)) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceSetSharedMemConfig(config not None : cudaSharedMemConfig): * """ Sets the shared memory configuration for the current device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_39cudaDeviceSetSharedMemConfig(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_38cudaDeviceSetSharedMemConfig, "cudaDeviceSetSharedMemConfig(config: cudaSharedMemConfig)\n\nSets the shared memory configuration for the current device.\n\n[Deprecated]\n\nOn devices with configurable shared memory banks, this function will\nset the shared memory bank size which is used for all subsequent kernel\nlaunches. Any per-function setting of shared memory set via\n:py:obj:`~.cudaFuncSetSharedMemConfig` will override the device wide\nsetting.\n\nChanging the shared memory configuration between launches may introduce\na device side synchronization point.\n\nChanging the shared memory bank size will not increase shared memory\nusage or affect occupancy of kernels, but may have major effects on\nperformance. Larger bank sizes will allow for greater potential\nbandwidth to shared memory, but will change what kinds of accesses to\nshared memory will result in bank conflicts.\n\nThis function will do nothing on devices with fixed shared memory bank\nsize.\n\nThe supported bank configurations are:\n\n- :py:obj:`~.cudaSharedMemBankSizeDefault`: set bank width the device\n default (currently, four bytes)\n\n- :py:obj:`~.cudaSharedMemBankSizeFourByte`: set shared memory bank\n width to be four bytes natively.\n\n- :py:obj:`~.cudaSharedMemBankSizeEightByte`: set shared memory bank\n width to be eight bytes natively.\n\nParameters\n----------\nconfig : :py:obj:`~.cudaSharedMemConfig`\n Requested cache configuration\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\n\nSee Also\n--------\n:py:obj:`~.cudaDeviceSetCacheConfig`, :py:obj:`~.cudaDeviceGetCacheConfig`, :py:obj:`~.cudaDeviceGetSharedMemConfig`, :py:obj:`~.cudaFuncSetCacheConfig`, :py:obj:`~.cuCtxSetSharedMemConfig`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_39cudaDeviceSetSharedMemConfig = {"cudaDeviceSetSharedMemConfig", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_39cudaDeviceSetSharedMemConfig, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_38cudaDeviceSetSharedMemConfig}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_39cudaDeviceSetSharedMemConfig(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_config = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaDeviceSetSharedMemConfig (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_config,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 14842, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14842, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaDeviceSetSharedMemConfig", 0) < (0)) __PYX_ERR(0, 14842, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaDeviceSetSharedMemConfig", 1, 1, 1, i); __PYX_ERR(0, 14842, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14842, __pyx_L3_error) } __pyx_v_config = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaDeviceSetSharedMemConfig", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 14842, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceSetSharedMemConfig", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_config) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "config"); __PYX_ERR(0, 14843, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_38cudaDeviceSetSharedMemConfig(__pyx_self, __pyx_v_config); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_38cudaDeviceSetSharedMemConfig(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_config) { enum cudaSharedMemConfig __pyx_v_cyconfig; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; enum cudaSharedMemConfig __pyx_t_2; cudaError_t __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaDeviceSetSharedMemConfig", 0); /* "cuda/bindings/runtime.pyx":14891 * :py:obj:`~.cudaDeviceSetCacheConfig`, :py:obj:`~.cudaDeviceGetCacheConfig`, :py:obj:`~.cudaDeviceGetSharedMemConfig`, :py:obj:`~.cudaFuncSetCacheConfig`, :py:obj:`~.cuCtxSetSharedMemConfig` * """ * cdef cyruntime.cudaSharedMemConfig cyconfig = config.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaDeviceSetSharedMemConfig(cyconfig) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_config, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14891, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = ((enum cudaSharedMemConfig)__Pyx_PyLong_As_enum__cudaSharedMemConfig(__pyx_t_1)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 14891, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cyconfig = __pyx_t_2; /* "cuda/bindings/runtime.pyx":14892 * """ * cdef cyruntime.cudaSharedMemConfig cyconfig = config.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceSetSharedMemConfig(cyconfig) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":14893 * cdef cyruntime.cudaSharedMemConfig cyconfig = config.value * with nogil: * err = cyruntime.cudaDeviceSetSharedMemConfig(cyconfig) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_3 = __pyx_f_4cuda_8bindings_9cyruntime_cudaDeviceSetSharedMemConfig(__pyx_v_cyconfig); if (unlikely(__pyx_t_3 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 14893, __pyx_L4_error) __pyx_v_err = __pyx_t_3; } /* "cuda/bindings/runtime.pyx":14892 * """ * cdef cyruntime.cudaSharedMemConfig cyconfig = config.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceSetSharedMemConfig(cyconfig) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":14894 * with nogil: * err = cyruntime.cudaDeviceSetSharedMemConfig(cyconfig) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14894, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14894, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14894, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14894, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 14894, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":14842 * return (_dict_cudaError_t[err], cudaSharedMemConfig(pConfig)) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceSetSharedMemConfig(config not None : cudaSharedMemConfig): * """ Sets the shared memory configuration for the current device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceSetSharedMemConfig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":14896 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGetLastError(): * """ Returns the last error from a runtime call. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_41cudaGetLastError(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_40cudaGetLastError, "cudaGetLastError()\n\nReturns the last error from a runtime call.\n\nReturns the last error that has been produced by any of the runtime\ncalls in the same instance of the CUDA Runtime library in the host\nthread and resets it to :py:obj:`~.cudaSuccess`.\n\nNote: Multiple instances of the CUDA Runtime library can be present in\nan application when using a library that statically links the CUDA\nRuntime.\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorMissingConfiguration`, :py:obj:`~.cudaErrorMemoryAllocation`, :py:obj:`~.cudaErrorInitializationError`, :py:obj:`~.cudaErrorLaunchFailure`, :py:obj:`~.cudaErrorLaunchTimeout`, :py:obj:`~.cudaErrorLaunchOutOfResources`, :py:obj:`~.cudaErrorInvalidDeviceFunction`, :py:obj:`~.cudaErrorInvalidConfiguration`, :py:obj:`~.cudaErrorInvalidDevice`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidPitchValue`, :py:obj:`~.cudaErrorInvalidSymbol`, :py:obj:`~.cudaErrorUnmapBufferObjectFailed`, :py:obj:`~.cudaErrorInvalidDevicePointer`, :py:obj:`~.cudaErrorInvalidTexture`, :py:obj:`~.cudaErrorInvalidTextureBinding`, :py:obj:`~.cudaErrorInvalidChannelDescriptor`, :py:obj:`~.cudaErrorInvalidMemcpyDirection`, :py:obj:`~.cudaErrorInvalidFilterSetting`, :py:obj:`~.cudaErrorInvalidNormSetting`, :py:obj:`~.cudaErrorUnknown`, :py:obj:`~.cudaErrorInvalidResourceHandle`, :py:obj:`~.cudaErrorInsufficientDriver`, :py:obj:`~.cudaErrorNoDevice`, :py:obj:`~.cudaErrorSetOnActiveProcess`, :py:obj:`~.cudaErrorStartupFailure`, :py:obj:`~.cudaErrorInvalidPtx`, :py:obj:`~.cudaErrorUnsupportedPtxVersion`, :py:obj:`~.cudaErrorNoKernelImageForDevice`, :py:obj:`~.cudaErrorJitCompilerNotFound`, :py:obj:`~.cudaErrorJitCompilationDisabled`\n\nSee Also\n--------\n:py:obj:`~.cudaPeekAtLastError`, :py:obj:`~.cudaGetErrorName`, :py:obj:`~.cudaGetErrorString`, :py:obj:`~.cudaError`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_41cudaGetLastError = {"cudaGetLastError", (PyCFunction)__pyx_pw_4cuda_8bindings_7runtime_41cudaGetLastError, METH_NOARGS, __pyx_doc_4cuda_8bindings_7runtime_40cudaGetLastError}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_41cudaGetLastError(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGetLastError (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_40cudaGetLastError(__pyx_self); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_40cudaGetLastError(CYTHON_UNUSED PyObject *__pyx_self) { cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations cudaError_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGetLastError", 0); /* "cuda/bindings/runtime.pyx":14917 * :py:obj:`~.cudaPeekAtLastError`, :py:obj:`~.cudaGetErrorName`, :py:obj:`~.cudaGetErrorString`, :py:obj:`~.cudaError` * """ * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGetLastError() * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":14918 * """ * with nogil: * err = cyruntime.cudaGetLastError() # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_1 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGetLastError(); if (unlikely(__pyx_t_1 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 14918, __pyx_L4_error) __pyx_v_err = __pyx_t_1; } /* "cuda/bindings/runtime.pyx":14917 * :py:obj:`~.cudaPeekAtLastError`, :py:obj:`~.cudaGetErrorName`, :py:obj:`~.cudaGetErrorString`, :py:obj:`~.cudaError` * """ * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGetLastError() * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":14919 * with nogil: * err = cyruntime.cudaGetLastError() * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14919, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14919, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14919, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14919, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 14919, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":14896 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGetLastError(): * """ Returns the last error from a runtime call. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGetLastError", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":14921 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaPeekAtLastError(): * """ Returns the last error from a runtime call. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_43cudaPeekAtLastError(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_42cudaPeekAtLastError, "cudaPeekAtLastError()\n\nReturns the last error from a runtime call.\n\nReturns the last error that has been produced by any of the runtime\ncalls in the same instance of the CUDA Runtime library in the host\nthread. This call does not reset the error to :py:obj:`~.cudaSuccess`\nlike :py:obj:`~.cudaGetLastError()`.\n\nNote: Multiple instances of the CUDA Runtime library can be present in\nan application when using a library that statically links the CUDA\nRuntime.\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorMissingConfiguration`, :py:obj:`~.cudaErrorMemoryAllocation`, :py:obj:`~.cudaErrorInitializationError`, :py:obj:`~.cudaErrorLaunchFailure`, :py:obj:`~.cudaErrorLaunchTimeout`, :py:obj:`~.cudaErrorLaunchOutOfResources`, :py:obj:`~.cudaErrorInvalidDeviceFunction`, :py:obj:`~.cudaErrorInvalidConfiguration`, :py:obj:`~.cudaErrorInvalidDevice`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidPitchValue`, :py:obj:`~.cudaErrorInvalidSymbol`, :py:obj:`~.cudaErrorUnmapBufferObjectFailed`, :py:obj:`~.cudaErrorInvalidDevicePointer`, :py:obj:`~.cudaErrorInvalidTexture`, :py:obj:`~.cudaErrorInvalidTextureBinding`, :py:obj:`~.cudaErrorInvalidChannelDescriptor`, :py:obj:`~.cudaErrorInvalidMemcpyDirection`, :py:obj:`~.cudaErrorInvalidFilterSetting`, :py:obj:`~.cudaErrorInvalidNormSetting`, :py:obj:`~.cudaErrorUnknown`, :py:obj:`~.cudaErrorInvalidResourceHandle`, :py:obj:`~.cudaErrorInsufficientDriver`, :py:obj:`~.cudaErrorNoDevice`, :py:obj:`~.cudaErrorSetOnActiveProcess`, :py:obj:`~.cudaErrorStartupFailure`, :py:obj:`~.cudaErrorInvalidPtx`, :py:obj:`~.cudaErrorUnsupportedPtxVersion`, :py:obj:`~.cudaErrorNoKernelImageForDevice`, :py:obj:`~.cudaErrorJitCompilerNotFound`, :py:obj:`~.cudaErrorJitCompilationDisabled`\n\nSee Also\n--------\n:py:obj:`~.cudaGetLastError`, :py:obj:`~.cudaGetErrorName`, :py:obj:`~.cudaGetErrorString`, :py:obj:`~.cudaError`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_43cudaPeekAtLastError = {"cudaPeekAtLastError", (PyCFunction)__pyx_pw_4cuda_8bindings_7runtime_43cudaPeekAtLastError, METH_NOARGS, __pyx_doc_4cuda_8bindings_7runtime_42cudaPeekAtLastError}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_43cudaPeekAtLastError(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaPeekAtLastError (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_42cudaPeekAtLastError(__pyx_self); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_42cudaPeekAtLastError(CYTHON_UNUSED PyObject *__pyx_self) { cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations cudaError_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaPeekAtLastError", 0); /* "cuda/bindings/runtime.pyx":14943 * :py:obj:`~.cudaGetLastError`, :py:obj:`~.cudaGetErrorName`, :py:obj:`~.cudaGetErrorString`, :py:obj:`~.cudaError` * """ * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaPeekAtLastError() * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":14944 * """ * with nogil: * err = cyruntime.cudaPeekAtLastError() # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_1 = __pyx_f_4cuda_8bindings_9cyruntime_cudaPeekAtLastError(); if (unlikely(__pyx_t_1 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 14944, __pyx_L4_error) __pyx_v_err = __pyx_t_1; } /* "cuda/bindings/runtime.pyx":14943 * :py:obj:`~.cudaGetLastError`, :py:obj:`~.cudaGetErrorName`, :py:obj:`~.cudaGetErrorString`, :py:obj:`~.cudaError` * """ * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaPeekAtLastError() * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":14945 * with nogil: * err = cyruntime.cudaPeekAtLastError() * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14945, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14945, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14945, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14945, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 14945, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":14921 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaPeekAtLastError(): * """ Returns the last error from a runtime call. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaPeekAtLastError", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":14947 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGetErrorName(error not None : cudaError_t): * """ Returns the string representation of an error code enum name. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_45cudaGetErrorName(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_44cudaGetErrorName, "cudaGetErrorName(error: cudaError_t)\n\nReturns the string representation of an error code enum name.\n\nReturns a string containing the name of an error code in the enum. If\nthe error code is not recognized, \"unrecognized error code\" is\nreturned.\n\nParameters\n----------\nerror : :py:obj:`~.cudaError_t`\n Error code to convert to string\n\nReturns\n-------\ncudaError_t.cudaSuccess\n cudaError_t.cudaSuccess\nbytes\n `char*` pointer to a NULL-terminated string\n\nSee Also\n--------\n:py:obj:`~.cudaGetErrorString`, :py:obj:`~.cudaGetLastError`, :py:obj:`~.cudaPeekAtLastError`, :py:obj:`~.cudaError`, :py:obj:`~.cuGetErrorName`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_45cudaGetErrorName = {"cudaGetErrorName", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_45cudaGetErrorName, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_44cudaGetErrorName}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_45cudaGetErrorName(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_error = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGetErrorName (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_error,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 14947, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14947, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGetErrorName", 0) < (0)) __PYX_ERR(0, 14947, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGetErrorName", 1, 1, 1, i); __PYX_ERR(0, 14947, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14947, __pyx_L3_error) } __pyx_v_error = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGetErrorName", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 14947, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGetErrorName", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_error) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "error"); __PYX_ERR(0, 14948, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_44cudaGetErrorName(__pyx_self, __pyx_v_error); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_44cudaGetErrorName(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_error) { cudaError_t __pyx_v_cyerror; char const *__pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; cudaError_t __pyx_t_2; char const *__pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGetErrorName", 0); /* "cuda/bindings/runtime.pyx":14971 * :py:obj:`~.cudaGetErrorString`, :py:obj:`~.cudaGetLastError`, :py:obj:`~.cudaPeekAtLastError`, :py:obj:`~.cudaError`, :py:obj:`~.cuGetErrorName` * """ * cdef cyruntime.cudaError_t cyerror = error.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGetErrorName(cyerror) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_error, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14971, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = ((enum cudaError)__Pyx_PyLong_As_enum__cudaError(__pyx_t_1)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 14971, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cyerror = __pyx_t_2; /* "cuda/bindings/runtime.pyx":14972 * """ * cdef cyruntime.cudaError_t cyerror = error.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGetErrorName(cyerror) * return (cudaError_t.cudaSuccess, err) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":14973 * cdef cyruntime.cudaError_t cyerror = error.value * with nogil: * err = cyruntime.cudaGetErrorName(cyerror) # <<<<<<<<<<<<<< * return (cudaError_t.cudaSuccess, err) * */ __pyx_t_3 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGetErrorName(__pyx_v_cyerror); if (unlikely(__pyx_t_3 == ((char const *)0) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 14973, __pyx_L4_error) __pyx_v_err = __pyx_t_3; } /* "cuda/bindings/runtime.pyx":14972 * """ * cdef cyruntime.cudaError_t cyerror = error.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGetErrorName(cyerror) * return (cudaError_t.cudaSuccess, err) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":14974 * with nogil: * err = cyruntime.cudaGetErrorName(cyerror) * return (cudaError_t.cudaSuccess, err) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14974, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_cudaSuccess); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14974, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_err); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14974, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 14974, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 14974, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1) != (0)) __PYX_ERR(0, 14974, __pyx_L1_error); __pyx_t_4 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":14947 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGetErrorName(error not None : cudaError_t): * """ Returns the string representation of an error code enum name. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGetErrorName", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":14976 * return (cudaError_t.cudaSuccess, err) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGetErrorString(error not None : cudaError_t): * """ Returns the description string for an error code. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_47cudaGetErrorString(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_46cudaGetErrorString, "cudaGetErrorString(error: cudaError_t)\n\nReturns the description string for an error code.\n\nReturns the description string for an error code. If the error code is\nnot recognized, \"unrecognized error code\" is returned.\n\nParameters\n----------\nerror : :py:obj:`~.cudaError_t`\n Error code to convert to string\n\nReturns\n-------\ncudaError_t.cudaSuccess\n cudaError_t.cudaSuccess\nbytes\n `char*` pointer to a NULL-terminated string\n\nSee Also\n--------\n:py:obj:`~.cudaGetErrorName`, :py:obj:`~.cudaGetLastError`, :py:obj:`~.cudaPeekAtLastError`, :py:obj:`~.cudaError`, :py:obj:`~.cuGetErrorString`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_47cudaGetErrorString = {"cudaGetErrorString", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_47cudaGetErrorString, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_46cudaGetErrorString}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_47cudaGetErrorString(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_error = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGetErrorString (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_error,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 14976, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14976, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGetErrorString", 0) < (0)) __PYX_ERR(0, 14976, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGetErrorString", 1, 1, 1, i); __PYX_ERR(0, 14976, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 14976, __pyx_L3_error) } __pyx_v_error = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGetErrorString", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 14976, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGetErrorString", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_error) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "error"); __PYX_ERR(0, 14977, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_46cudaGetErrorString(__pyx_self, __pyx_v_error); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_46cudaGetErrorString(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_error) { cudaError_t __pyx_v_cyerror; char const *__pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; cudaError_t __pyx_t_2; char const *__pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGetErrorString", 0); /* "cuda/bindings/runtime.pyx":14999 * :py:obj:`~.cudaGetErrorName`, :py:obj:`~.cudaGetLastError`, :py:obj:`~.cudaPeekAtLastError`, :py:obj:`~.cudaError`, :py:obj:`~.cuGetErrorString` * """ * cdef cyruntime.cudaError_t cyerror = error.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGetErrorString(cyerror) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_error, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14999, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = ((enum cudaError)__Pyx_PyLong_As_enum__cudaError(__pyx_t_1)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 14999, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cyerror = __pyx_t_2; /* "cuda/bindings/runtime.pyx":15000 * """ * cdef cyruntime.cudaError_t cyerror = error.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGetErrorString(cyerror) * return (cudaError_t.cudaSuccess, err) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":15001 * cdef cyruntime.cudaError_t cyerror = error.value * with nogil: * err = cyruntime.cudaGetErrorString(cyerror) # <<<<<<<<<<<<<< * return (cudaError_t.cudaSuccess, err) * */ __pyx_t_3 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGetErrorString(__pyx_v_cyerror); if (unlikely(__pyx_t_3 == ((char const *)0) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 15001, __pyx_L4_error) __pyx_v_err = __pyx_t_3; } /* "cuda/bindings/runtime.pyx":15000 * """ * cdef cyruntime.cudaError_t cyerror = error.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGetErrorString(cyerror) * return (cudaError_t.cudaSuccess, err) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":15002 * with nogil: * err = cyruntime.cudaGetErrorString(cyerror) * return (cudaError_t.cudaSuccess, err) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15002, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_cudaSuccess); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15002, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_err); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15002, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15002, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 15002, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1) != (0)) __PYX_ERR(0, 15002, __pyx_L1_error); __pyx_t_4 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":14976 * return (cudaError_t.cudaSuccess, err) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGetErrorString(error not None : cudaError_t): * """ Returns the description string for an error code. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGetErrorString", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":15004 * return (cudaError_t.cudaSuccess, err) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGetDeviceCount(): * """ Returns the number of compute-capable devices. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_49cudaGetDeviceCount(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_48cudaGetDeviceCount, "cudaGetDeviceCount()\n\nReturns the number of compute-capable devices.\n\nReturns in `*count` the number of devices with compute capability\ngreater or equal to 2.0 that are available for execution.\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`\ncount : int\n Returns the number of devices with compute capability greater or\n equal to 2.0\n\nSee Also\n--------\n:py:obj:`~.cudaGetDevice`, :py:obj:`~.cudaSetDevice`, :py:obj:`~.cudaGetDeviceProperties`, :py:obj:`~.cudaChooseDevice`, :py:obj:`~.cudaInitDevice`, :py:obj:`~.cuDeviceGetCount`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_49cudaGetDeviceCount = {"cudaGetDeviceCount", (PyCFunction)__pyx_pw_4cuda_8bindings_7runtime_49cudaGetDeviceCount, METH_NOARGS, __pyx_doc_4cuda_8bindings_7runtime_48cudaGetDeviceCount}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_49cudaGetDeviceCount(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGetDeviceCount (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_48cudaGetDeviceCount(__pyx_self); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_48cudaGetDeviceCount(CYTHON_UNUSED PyObject *__pyx_self) { int __pyx_v_count; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations cudaError_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGetDeviceCount", 0); /* "cuda/bindings/runtime.pyx":15023 * :py:obj:`~.cudaGetDevice`, :py:obj:`~.cudaSetDevice`, :py:obj:`~.cudaGetDeviceProperties`, :py:obj:`~.cudaChooseDevice`, :py:obj:`~.cudaInitDevice`, :py:obj:`~.cuDeviceGetCount` * """ * cdef int count = 0 # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGetDeviceCount(&count) */ __pyx_v_count = 0; /* "cuda/bindings/runtime.pyx":15024 * """ * cdef int count = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGetDeviceCount(&count) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":15025 * cdef int count = 0 * with nogil: * err = cyruntime.cudaGetDeviceCount(&count) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_1 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGetDeviceCount((&__pyx_v_count)); if (unlikely(__pyx_t_1 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 15025, __pyx_L4_error) __pyx_v_err = __pyx_t_1; } /* "cuda/bindings/runtime.pyx":15024 * """ * cdef int count = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGetDeviceCount(&count) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":15026 * with nogil: * err = cyruntime.cudaGetDeviceCount(&count) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], count) */ __pyx_t_2 = (__pyx_v_err != cudaSuccess); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":15027 * err = cyruntime.cudaGetDeviceCount(&count) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], count) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15027, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15027, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15027, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15027, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 15027, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, Py_None) != (0)) __PYX_ERR(0, 15027, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":15026 * with nogil: * err = cyruntime.cudaGetDeviceCount(&count) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], count) */ } /* "cuda/bindings/runtime.pyx":15028 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], count) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15028, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15028, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15028, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyLong_From_int(__pyx_v_count); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15028, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15028, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 15028, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_5) != (0)) __PYX_ERR(0, 15028, __pyx_L1_error); __pyx_t_3 = 0; __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":15004 * return (cudaError_t.cudaSuccess, err) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGetDeviceCount(): * """ Returns the number of compute-capable devices. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGetDeviceCount", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":15030 * return (_dict_cudaError_t[err], count) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGetDeviceProperties(int device): * """ Returns information about the compute-device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_51cudaGetDeviceProperties(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_50cudaGetDeviceProperties, "cudaGetDeviceProperties(int device)\n\nReturns information about the compute-device.\n\nReturns in `*prop` the properties of device `dev`. The\n:py:obj:`~.cudaDeviceProp` structure is defined as:\n\n**View CUDA Toolkit Documentation for a C++ code example**\n\nwhere:\n\n- :py:obj:`~.name[256]` is an ASCII string identifying the device.\n\n- :py:obj:`~.uuid` is a 16-byte unique identifier.\n\n- :py:obj:`~.totalGlobalMem` is the total amount of global memory\n available on the device in bytes.\n\n- :py:obj:`~.sharedMemPerBlock` is the maximum amount of shared memory\n available to a thread block in bytes.\n\n- :py:obj:`~.regsPerBlock` is the maximum number of 32-bit registers\n available to a thread block.\n\n- :py:obj:`~.warpSize` is the warp size in threads.\n\n- :py:obj:`~.memPitch` is the maximum pitch in bytes allowed by the\n memory copy functions that involve memory regions allocated through\n :py:obj:`~.cudaMallocPitch()`.\n\n- :py:obj:`~.maxThreadsPerBlock` is the maximum number of threads per\n block.\n\n- :py:obj:`~.maxThreadsDim[3]` contains the maximum size of each\n dimension of a block.\n\n- :py:obj:`~.maxGridSize[3]` contains the maximum size of each\n dimension of a grid.\n\n- :py:obj:`~.clockRate` is the clock frequency in kilohertz.\n\n- :py:obj:`~.totalConstMem` is the total amount of constant memory\n available on the device in bytes.\n\n- :py:obj:`~.major`, :py:obj:`~.minor` are the major and minor revision\n numbers defining the device's compute capability.\n\n- :py:obj:`~.textureAlignment` is the alignment requirement; texture\n base addresses that are aligned to :py:obj:`~.textureAlignment` bytes\n do not need an offset applied to texture fetches.\n\n- :py:obj:`~.texturePitchAlignment` is the pitch alignment requirement\n for 2D texture references that are bound to pitched memory.\n\n- :py:obj:`~.deviceOverlap` is 1 if the device can concurrently copy\n memory between host and device while executing a kernel, or 0 if not.\n Depr""ecated, use instead asyncEngineCount.\n\n- :py:obj:`~.multiProcessorCount` is the number of multiprocessors on\n the device.\n\n- :py:obj:`~.kernelExecTimeoutEnabled` is 1 if there is a run time\n limit for kernels executed on the device, or 0 if not.\n\n- :py:obj:`~.integrated` is 1 if the device is an integrated\n (motherboard) GPU and 0 if it is a discrete (card) component.\n\n- :py:obj:`~.canMapHostMemory` is 1 if the device can map host memory\n into the CUDA address space for use with\n :py:obj:`~.cudaHostAlloc()`/:py:obj:`~.cudaHostGetDevicePointer()`,\n or 0 if not.\n\n- :py:obj:`~.computeMode` is the compute mode that the device is\n currently in. Available modes are as follows:\n\n - cudaComputeModeDefault: Default mode - Device is not restricted and\n multiple threads can use :py:obj:`~.cudaSetDevice()` with this\n device.\n\n - cudaComputeModeProhibited: Compute-prohibited mode - No threads can\n use :py:obj:`~.cudaSetDevice()` with this device.\n\n - cudaComputeModeExclusiveProcess: Compute-exclusive-process mode -\n Many threads in one process will be able to use\n :py:obj:`~.cudaSetDevice()` with this device. When an occupied\n exclusive mode device is chosen with :py:obj:`~.cudaSetDevice`, all\n subsequent non-device management runtime functions will return\n :py:obj:`~.cudaErrorDevicesUnavailable`.\n\n- :py:obj:`~.maxTexture1D` is the maximum 1D texture size.\n\n- :py:obj:`~.maxTexture1DMipmap` is the maximum 1D mipmapped texture\n texture size.\n\n- :py:obj:`~.maxTexture1DLinear` is the maximum 1D texture size for\n textures bound to linear memory.\n\n- :py:obj:`~.maxTexture2D[2]` contains the maximum 2D texture\n dimensions.\n\n- :py:obj:`~.maxTexture2DMipmap[2]` contains the maximum 2D mipmapped\n texture dimensions.\n\n- :py:obj:`~.maxTexture2DLinear[3]` contains the maximum 2D texture\n dimensions for 2D textures bound to pitch linear memory.\n\n- :py:obj:`~.maxTexture2DGather[2]` contains the maximum 2D"" texture\n dimensions if texture gather operations have to be performed.\n\n- :py:obj:`~.maxTexture3D[3]` contains the maximum 3D texture\n dimensions.\n\n- :py:obj:`~.maxTexture3DAlt[3]` contains the maximum alternate 3D\n texture dimensions.\n\n- :py:obj:`~.maxTextureCubemap` is the maximum cubemap texture width or\n height.\n\n- :py:obj:`~.maxTexture1DLayered[2]` contains the maximum 1D layered\n texture dimensions.\n\n- :py:obj:`~.maxTexture2DLayered[3]` contains the maximum 2D layered\n texture dimensions.\n\n- :py:obj:`~.maxTextureCubemapLayered[2]` contains the maximum cubemap\n layered texture dimensions.\n\n- :py:obj:`~.maxSurface1D` is the maximum 1D surface size.\n\n- :py:obj:`~.maxSurface2D[2]` contains the maximum 2D surface\n dimensions.\n\n- :py:obj:`~.maxSurface3D[3]` contains the maximum 3D surface\n dimensions.\n\n- :py:obj:`~.maxSurface1DLayered[2]` contains the maximum 1D layered\n surface dimensions.\n\n- :py:obj:`~.maxSurface2DLayered[3]` contains the maximum 2D layered\n surface dimensions.\n\n- :py:obj:`~.maxSurfaceCubemap` is the maximum cubemap surface width or\n height.\n\n- :py:obj:`~.maxSurfaceCubemapLayered[2]` contains the maximum cubemap\n layered surface dimensions.\n\n- :py:obj:`~.surfaceAlignment` specifies the alignment requirements for\n surfaces.\n\n- :py:obj:`~.concurrentKernels` is 1 if the device supports executing\n multiple kernels within the same context simultaneously, or 0 if not.\n It is not guaranteed that multiple kernels will be resident on the\n device concurrently so this feature should not be relied upon for\n correctness.\n\n- :py:obj:`~.ECCEnabled` is 1 if the device has ECC support turned on,\n or 0 if not.\n\n- :py:obj:`~.pciBusID` is the PCI bus identifier of the device.\n\n- :py:obj:`~.pciDeviceID` is the PCI device (sometimes called slot)\n identifier of the device.\n\n- :py:obj:`~.pciDomainID` is the PCI domain identifier of the device.\n\n- :py:obj:`~.tccDriver` is 1 if the device is u""sing a TCC driver or 0\n if not.\n\n- :py:obj:`~.asyncEngineCount` is 1 when the device can concurrently\n copy memory between host and device while executing a kernel. It is 2\n when the device can concurrently copy memory between host and device\n in both directions and execute a kernel at the same time. It is 0 if\n neither of these is supported.\n\n- :py:obj:`~.unifiedAddressing` is 1 if the device shares a unified\n address space with the host and 0 otherwise.\n\n- :py:obj:`~.memoryClockRate` is the peak memory clock frequency in\n kilohertz.\n\n- :py:obj:`~.memoryBusWidth` is the memory bus width in bits.\n\n- :py:obj:`~.l2CacheSize` is L2 cache size in bytes.\n\n- :py:obj:`~.persistingL2CacheMaxSize` is L2 cache's maximum persisting\n lines size in bytes.\n\n- :py:obj:`~.maxThreadsPerMultiProcessor` is the number of maximum\n resident threads per multiprocessor.\n\n- :py:obj:`~.streamPrioritiesSupported` is 1 if the device supports\n stream priorities, or 0 if it is not supported.\n\n- :py:obj:`~.globalL1CacheSupported` is 1 if the device supports\n caching of globals in L1 cache, or 0 if it is not supported.\n\n- :py:obj:`~.localL1CacheSupported` is 1 if the device supports caching\n of locals in L1 cache, or 0 if it is not supported.\n\n- :py:obj:`~.sharedMemPerMultiprocessor` is the maximum amount of\n shared memory available to a multiprocessor in bytes; this amount is\n shared by all thread blocks simultaneously resident on a\n multiprocessor.\n\n- :py:obj:`~.regsPerMultiprocessor` is the maximum number of 32-bit\n registers available to a multiprocessor; this number is shared by all\n thread blocks simultaneously resident on a multiprocessor.\n\n- :py:obj:`~.managedMemory` is 1 if the device supports allocating\n managed memory on this system, or 0 if it is not supported.\n\n- :py:obj:`~.isMultiGpuBoard` is 1 if the device is on a multi-GPU\n board (e.g. Gemini cards), and 0 if not;\n\n- :py:obj:`~.multiGpuBoardGroupID` is a uniqu""e identifier for a group\n of devices associated with the same board. Devices on the same multi-\n GPU board will share the same identifier.\n\n- :py:obj:`~.hostNativeAtomicSupported` is 1 if the link between the\n device and the host supports native atomic operations, or 0 if it is\n not supported.\n\n- :py:obj:`~.singleToDoublePrecisionPerfRatio` is the ratio of single\n precision performance (in floating-point operations per second) to\n double precision performance.\n\n- :py:obj:`~.pageableMemoryAccess` is 1 if the device supports\n coherently accessing pageable memory without calling cudaHostRegister\n on it, and 0 otherwise.\n\n- :py:obj:`~.concurrentManagedAccess` is 1 if the device can coherently\n access managed memory concurrently with the CPU, and 0 otherwise.\n\n- :py:obj:`~.computePreemptionSupported` is 1 if the device supports\n Compute Preemption, and 0 otherwise.\n\n- :py:obj:`~.canUseHostPointerForRegisteredMem` is 1 if the device can\n access host registered memory at the same virtual address as the CPU,\n and 0 otherwise.\n\n- :py:obj:`~.cooperativeLaunch` is 1 if the device supports launching\n cooperative kernels via :py:obj:`~.cudaLaunchCooperativeKernel`, and\n 0 otherwise.\n\n- :py:obj:`~.cooperativeMultiDeviceLaunch` is 1 if the device supports\n launching cooperative kernels via\n :py:obj:`~.cudaLaunchCooperativeKernelMultiDevice`, and 0 otherwise.\n\n- :py:obj:`~.sharedMemPerBlockOptin` is the per device maximum shared\n memory per block usable by special opt in\n\n- :py:obj:`~.pageableMemoryAccessUsesHostPageTables` is 1 if the device\n accesses pageable memory via the host's page tables, and 0 otherwise.\n\n- :py:obj:`~.directManagedMemAccessFromHost` is 1 if the host can\n directly access managed memory on the device without migration, and 0\n otherwise.\n\n- :py:obj:`~.maxBlocksPerMultiProcessor` is the maximum number of\n thread blocks that can reside on a multiprocessor.\n\n- :py:obj:`~.accessPolicyMaxWindowSi""ze` is the maximum value of\n :py:obj:`~.cudaAccessPolicyWindow.num_bytes`.\n\n- :py:obj:`~.reservedSharedMemPerBlock` is the shared memory reserved\n by CUDA driver per block in bytes\n\n- :py:obj:`~.hostRegisterSupported` is 1 if the device supports host\n memory registration via :py:obj:`~.cudaHostRegister`, and 0\n otherwise.\n\n- :py:obj:`~.sparseCudaArraySupported` is 1 if the device supports\n sparse CUDA arrays and sparse CUDA mipmapped arrays, 0 otherwise\n\n- :py:obj:`~.hostRegisterReadOnlySupported` is 1 if the device supports\n using the :py:obj:`~.cudaHostRegister` flag cudaHostRegisterReadOnly\n to register memory that must be mapped as read-only to the GPU\n\n- :py:obj:`~.timelineSemaphoreInteropSupported` is 1 if external\n timeline semaphore interop is supported on the device, 0 otherwise\n\n- :py:obj:`~.memoryPoolsSupported` is 1 if the device supports using\n the cudaMallocAsync and cudaMemPool family of APIs, 0 otherwise\n\n- :py:obj:`~.gpuDirectRDMASupported` is 1 if the device supports\n GPUDirect RDMA APIs, 0 otherwise\n\n- :py:obj:`~.gpuDirectRDMAFlushWritesOptions` is a bitmask to be\n interpreted according to the\n :py:obj:`~.cudaFlushGPUDirectRDMAWritesOptions` enum\n\n- :py:obj:`~.gpuDirectRDMAWritesOrdering` See the\n :py:obj:`~.cudaGPUDirectRDMAWritesOrdering` enum for numerical values\n\n- :py:obj:`~.memoryPoolSupportedHandleTypes` is a bitmask of handle\n types supported with mempool-based IPC\n\n- :py:obj:`~.deferredMappingCudaArraySupported` is 1 if the device\n supports deferred mapping CUDA arrays and CUDA mipmapped arrays\n\n- :py:obj:`~.ipcEventSupported` is 1 if the device supports IPC Events,\n and 0 otherwise\n\n- :py:obj:`~.unifiedFunctionPointers` is 1 if the device support\n unified pointers, and 0 otherwise\n\nParameters\n----------\ndevice : int\n None\n\nReturns\n-------\ncudaError_t\n\nprop : :py:obj:`~.cudaDeviceProp`\n None"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_51cudaGetDeviceProperties = {"cudaGetDeviceProperties", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_51cudaGetDeviceProperties, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_50cudaGetDeviceProperties}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_51cudaGetDeviceProperties(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { int __pyx_v_device; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGetDeviceProperties (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 15030, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15030, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGetDeviceProperties", 0) < (0)) __PYX_ERR(0, 15030, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGetDeviceProperties", 1, 1, 1, i); __PYX_ERR(0, 15030, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15030, __pyx_L3_error) } __pyx_v_device = __Pyx_PyLong_As_int(values[0]); if (unlikely((__pyx_v_device == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 15031, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGetDeviceProperties", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 15030, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGetDeviceProperties", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_50cudaGetDeviceProperties(__pyx_self, __pyx_v_device); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_50cudaGetDeviceProperties(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_device) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaDeviceProp *__pyx_v_prop = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; cudaError_t __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGetDeviceProperties", 0); /* "cuda/bindings/runtime.pyx":15349 * None * """ * cdef cudaDeviceProp prop = cudaDeviceProp() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGetDeviceProperties(prop._pvt_ptr, device) */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaDeviceProp); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaDeviceProp); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (1-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15349, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_prop = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaDeviceProp *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":15350 * """ * cdef cudaDeviceProp prop = cudaDeviceProp() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGetDeviceProperties(prop._pvt_ptr, device) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":15351 * cdef cudaDeviceProp prop = cudaDeviceProp() * with nogil: * err = cyruntime.cudaGetDeviceProperties(prop._pvt_ptr, device) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_5 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGetDeviceProperties(((struct cudaDeviceProp *)__pyx_v_prop->_pvt_ptr), __pyx_v_device); if (unlikely(__pyx_t_5 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 15351, __pyx_L4_error) __pyx_v_err = __pyx_t_5; } /* "cuda/bindings/runtime.pyx":15350 * """ * cdef cudaDeviceProp prop = cudaDeviceProp() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGetDeviceProperties(prop._pvt_ptr, device) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":15352 * with nogil: * err = cyruntime.cudaGetDeviceProperties(prop._pvt_ptr, device) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], prop) */ __pyx_t_6 = (__pyx_v_err != cudaSuccess); if (__pyx_t_6) { /* "cuda/bindings/runtime.pyx":15353 * err = cyruntime.cudaGetDeviceProperties(prop._pvt_ptr, device) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], prop) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15353, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15353, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15353, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15353, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 15353, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 15353, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":15352 * with nogil: * err = cyruntime.cudaGetDeviceProperties(prop._pvt_ptr, device) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], prop) */ } /* "cuda/bindings/runtime.pyx":15354 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], prop) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15354, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15354, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15354, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15354, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 15354, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_prop); __Pyx_GIVEREF((PyObject *)__pyx_v_prop); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_prop)) != (0)) __PYX_ERR(0, 15354, __pyx_L1_error); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":15030 * return (_dict_cudaError_t[err], count) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGetDeviceProperties(int device): * """ Returns information about the compute-device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGetDeviceProperties", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_prop); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":15356 * return (_dict_cudaError_t[err], prop) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceGetAttribute(attr not None : cudaDeviceAttr, int device): * """ Returns information about the device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_53cudaDeviceGetAttribute(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_52cudaDeviceGetAttribute, "cudaDeviceGetAttribute(attr: cudaDeviceAttr, int device)\n\nReturns information about the device.\n\nReturns in `*value` the integer value of the attribute `attr` on device\n`device`. The supported attributes are:\n\n- :py:obj:`~.cudaDevAttrMaxThreadsPerBlock`: Maximum number of threads\n per block\n\n- :py:obj:`~.cudaDevAttrMaxBlockDimX`: Maximum x-dimension of a block\n\n- :py:obj:`~.cudaDevAttrMaxBlockDimY`: Maximum y-dimension of a block\n\n- :py:obj:`~.cudaDevAttrMaxBlockDimZ`: Maximum z-dimension of a block\n\n- :py:obj:`~.cudaDevAttrMaxGridDimX`: Maximum x-dimension of a grid\n\n- :py:obj:`~.cudaDevAttrMaxGridDimY`: Maximum y-dimension of a grid\n\n- :py:obj:`~.cudaDevAttrMaxGridDimZ`: Maximum z-dimension of a grid\n\n- :py:obj:`~.cudaDevAttrMaxSharedMemoryPerBlock`: Maximum amount of\n shared memory available to a thread block in bytes\n\n- :py:obj:`~.cudaDevAttrTotalConstantMemory`: Memory available on\n device for constant variables in a CUDA C kernel in bytes\n\n- :py:obj:`~.cudaDevAttrWarpSize`: Warp size in threads\n\n- :py:obj:`~.cudaDevAttrMaxPitch`: Maximum pitch in bytes allowed by\n the memory copy functions that involve memory regions allocated\n through :py:obj:`~.cudaMallocPitch()`\n\n- :py:obj:`~.cudaDevAttrMaxTexture1DWidth`: Maximum 1D texture width\n\n- :py:obj:`~.cudaDevAttrMaxTexture1DLinearWidth`: Maximum width for a\n 1D texture bound to linear memory\n\n- :py:obj:`~.cudaDevAttrMaxTexture1DMipmappedWidth`: Maximum mipmapped\n 1D texture width\n\n- :py:obj:`~.cudaDevAttrMaxTexture2DWidth`: Maximum 2D texture width\n\n- :py:obj:`~.cudaDevAttrMaxTexture2DHeight`: Maximum 2D texture height\n\n- :py:obj:`~.cudaDevAttrMaxTexture2DLinearWidth`: Maximum width for a\n 2D texture bound to linear memory\n\n- :py:obj:`~.cudaDevAttrMaxTexture2DLinearHeight`: Maximum height for a\n 2D texture bound to linear memory\n\n- :py:obj:`~.cudaDevAttrMaxTexture2DLinearPitch`: Maximum pitch in\n bytes for a 2D texture bound to linear memory\n\n- :py:o""bj:`~.cudaDevAttrMaxTexture2DMipmappedWidth`: Maximum mipmapped\n 2D texture width\n\n- :py:obj:`~.cudaDevAttrMaxTexture2DMipmappedHeight`: Maximum mipmapped\n 2D texture height\n\n- :py:obj:`~.cudaDevAttrMaxTexture3DWidth`: Maximum 3D texture width\n\n- :py:obj:`~.cudaDevAttrMaxTexture3DHeight`: Maximum 3D texture height\n\n- :py:obj:`~.cudaDevAttrMaxTexture3DDepth`: Maximum 3D texture depth\n\n- :py:obj:`~.cudaDevAttrMaxTexture3DWidthAlt`: Alternate maximum 3D\n texture width, 0 if no alternate maximum 3D texture size is supported\n\n- :py:obj:`~.cudaDevAttrMaxTexture3DHeightAlt`: Alternate maximum 3D\n texture height, 0 if no alternate maximum 3D texture size is\n supported\n\n- :py:obj:`~.cudaDevAttrMaxTexture3DDepthAlt`: Alternate maximum 3D\n texture depth, 0 if no alternate maximum 3D texture size is supported\n\n- :py:obj:`~.cudaDevAttrMaxTextureCubemapWidth`: Maximum cubemap\n texture width or height\n\n- :py:obj:`~.cudaDevAttrMaxTexture1DLayeredWidth`: Maximum 1D layered\n texture width\n\n- :py:obj:`~.cudaDevAttrMaxTexture1DLayeredLayers`: Maximum layers in a\n 1D layered texture\n\n- :py:obj:`~.cudaDevAttrMaxTexture2DLayeredWidth`: Maximum 2D layered\n texture width\n\n- :py:obj:`~.cudaDevAttrMaxTexture2DLayeredHeight`: Maximum 2D layered\n texture height\n\n- :py:obj:`~.cudaDevAttrMaxTexture2DLayeredLayers`: Maximum layers in a\n 2D layered texture\n\n- :py:obj:`~.cudaDevAttrMaxTextureCubemapLayeredWidth`: Maximum cubemap\n layered texture width or height\n\n- :py:obj:`~.cudaDevAttrMaxTextureCubemapLayeredLayers`: Maximum layers\n in a cubemap layered texture\n\n- :py:obj:`~.cudaDevAttrMaxSurface1DWidth`: Maximum 1D surface width\n\n- :py:obj:`~.cudaDevAttrMaxSurface2DWidth`: Maximum 2D surface width\n\n- :py:obj:`~.cudaDevAttrMaxSurface2DHeight`: Maximum 2D surface height\n\n- :py:obj:`~.cudaDevAttrMaxSurface3DWidth`: Maximum 3D surface width\n\n- :py:obj:`~.cudaDevAttrMaxSurface3DHeight`: Maximum 3D surface height\n\n- :py:obj:`~.cudaDev""AttrMaxSurface3DDepth`: Maximum 3D surface depth\n\n- :py:obj:`~.cudaDevAttrMaxSurface1DLayeredWidth`: Maximum 1D layered\n surface width\n\n- :py:obj:`~.cudaDevAttrMaxSurface1DLayeredLayers`: Maximum layers in a\n 1D layered surface\n\n- :py:obj:`~.cudaDevAttrMaxSurface2DLayeredWidth`: Maximum 2D layered\n surface width\n\n- :py:obj:`~.cudaDevAttrMaxSurface2DLayeredHeight`: Maximum 2D layered\n surface height\n\n- :py:obj:`~.cudaDevAttrMaxSurface2DLayeredLayers`: Maximum layers in a\n 2D layered surface\n\n- :py:obj:`~.cudaDevAttrMaxSurfaceCubemapWidth`: Maximum cubemap\n surface width\n\n- :py:obj:`~.cudaDevAttrMaxSurfaceCubemapLayeredWidth`: Maximum cubemap\n layered surface width\n\n- :py:obj:`~.cudaDevAttrMaxSurfaceCubemapLayeredLayers`: Maximum layers\n in a cubemap layered surface\n\n- :py:obj:`~.cudaDevAttrMaxRegistersPerBlock`: Maximum number of 32-bit\n registers available to a thread block\n\n- :py:obj:`~.cudaDevAttrClockRate`: Peak clock frequency in kilohertz\n\n- :py:obj:`~.cudaDevAttrTextureAlignment`: Alignment requirement;\n texture base addresses aligned to :py:obj:`~.textureAlign` bytes do\n not need an offset applied to texture fetches\n\n- :py:obj:`~.cudaDevAttrTexturePitchAlignment`: Pitch alignment\n requirement for 2D texture references bound to pitched memory\n\n- :py:obj:`~.cudaDevAttrGpuOverlap`: 1 if the device can concurrently\n copy memory between host and device while executing a kernel, or 0 if\n not\n\n- :py:obj:`~.cudaDevAttrMultiProcessorCount`: Number of multiprocessors\n on the device\n\n- :py:obj:`~.cudaDevAttrKernelExecTimeout`: 1 if there is a run time\n limit for kernels executed on the device, or 0 if not\n\n- :py:obj:`~.cudaDevAttrIntegrated`: 1 if the device is integrated with\n the memory subsystem, or 0 if not\n\n- :py:obj:`~.cudaDevAttrCanMapHostMemory`: 1 if the device can map host\n memory into the CUDA address space, or 0 if not\n\n- :py:obj:`~.cudaDevAttrComputeMode`: Compute mode is the compute mo""de\n that the device is currently in. Available modes are as follows:\n\n - :py:obj:`~.cudaComputeModeDefault`: Default mode - Device is not\n restricted and multiple threads can use :py:obj:`~.cudaSetDevice()`\n with this device.\n\n - :py:obj:`~.cudaComputeModeProhibited`: Compute-prohibited mode - No\n threads can use :py:obj:`~.cudaSetDevice()` with this device.\n\n - :py:obj:`~.cudaComputeModeExclusiveProcess`: Compute-exclusive-\n process mode - Many threads in one process will be able to use\n :py:obj:`~.cudaSetDevice()` with this device.\n\n- :py:obj:`~.cudaDevAttrConcurrentKernels`: 1 if the device supports\n executing multiple kernels within the same context simultaneously, or\n 0 if not. It is not guaranteed that multiple kernels will be resident\n on the device concurrently so this feature should not be relied upon\n for correctness.\n\n- :py:obj:`~.cudaDevAttrEccEnabled`: 1 if error correction is enabled\n on the device, 0 if error correction is disabled or not supported by\n the device\n\n- :py:obj:`~.cudaDevAttrPciBusId`: PCI bus identifier of the device\n\n- :py:obj:`~.cudaDevAttrPciDeviceId`: PCI device (also known as slot)\n identifier of the device\n\n- :py:obj:`~.cudaDevAttrTccDriver`: 1 if the device is using a TCC\n driver. TCC is only available on Tesla hardware running Windows Vista\n or later.\n\n- :py:obj:`~.cudaDevAttrMemoryClockRate`: Peak memory clock frequency\n in kilohertz\n\n- :py:obj:`~.cudaDevAttrGlobalMemoryBusWidth`: Global memory bus width\n in bits\n\n- :py:obj:`~.cudaDevAttrL2CacheSize`: Size of L2 cache in bytes. 0 if\n the device doesn't have L2 cache.\n\n- :py:obj:`~.cudaDevAttrMaxThreadsPerMultiProcessor`: Maximum resident\n threads per multiprocessor\n\n- :py:obj:`~.cudaDevAttrUnifiedAddressing`: 1 if the device shares a\n unified address space with the host, or 0 if not\n\n- :py:obj:`~.cudaDevAttrComputeCapabilityMajor`: Major compute\n capability version number\n\n- :py:obj:`~.cudaDevAt""trComputeCapabilityMinor`: Minor compute\n capability version number\n\n- :py:obj:`~.cudaDevAttrStreamPrioritiesSupported`: 1 if the device\n supports stream priorities, or 0 if not\n\n- :py:obj:`~.cudaDevAttrGlobalL1CacheSupported`: 1 if device supports\n caching globals in L1 cache, 0 if not\n\n- :py:obj:`~.cudaDevAttrLocalL1CacheSupported`: 1 if device supports\n caching locals in L1 cache, 0 if not\n\n- :py:obj:`~.cudaDevAttrMaxSharedMemoryPerMultiprocessor`: Maximum\n amount of shared memory available to a multiprocessor in bytes; this\n amount is shared by all thread blocks simultaneously resident on a\n multiprocessor\n\n- :py:obj:`~.cudaDevAttrMaxRegistersPerMultiprocessor`: Maximum number\n of 32-bit registers available to a multiprocessor; this number is\n shared by all thread blocks simultaneously resident on a\n multiprocessor\n\n- :py:obj:`~.cudaDevAttrManagedMemory`: 1 if device supports allocating\n managed memory, 0 if not\n\n- :py:obj:`~.cudaDevAttrIsMultiGpuBoard`: 1 if device is on a multi-GPU\n board, 0 if not\n\n- :py:obj:`~.cudaDevAttrMultiGpuBoardGroupID`: Unique identifier for a\n group of devices on the same multi-GPU board\n\n- :py:obj:`~.cudaDevAttrHostNativeAtomicSupported`: 1 if the link\n between the device and the host supports native atomic operations\n\n- :py:obj:`~.cudaDevAttrSingleToDoublePrecisionPerfRatio`: Ratio of\n single precision performance (in floating-point operations per\n second) to double precision performance\n\n- :py:obj:`~.cudaDevAttrPageableMemoryAccess`: 1 if the device supports\n coherently accessing pageable memory without calling cudaHostRegister\n on it, and 0 otherwise\n\n- :py:obj:`~.cudaDevAttrConcurrentManagedAccess`: 1 if the device can\n coherently access managed memory concurrently with the CPU, and 0\n otherwise\n\n- :py:obj:`~.cudaDevAttrComputePreemptionSupported`: 1 if the device\n supports Compute Preemption, 0 if not\n\n- :py:obj:`~.cudaDevAttrCanUseHostPointerForRegisteredMem""`: 1 if the\n device can access host registered memory at the same virtual address\n as the CPU, and 0 otherwise\n\n- :py:obj:`~.cudaDevAttrCooperativeLaunch`: 1 if the device supports\n launching cooperative kernels via\n :py:obj:`~.cudaLaunchCooperativeKernel`, and 0 otherwise\n\n- :py:obj:`~.cudaDevAttrCooperativeMultiDeviceLaunch`: 1 if the device\n supports launching cooperative kernels via\n :py:obj:`~.cudaLaunchCooperativeKernelMultiDevice`, and 0 otherwise\n\n- :py:obj:`~.cudaDevAttrCanFlushRemoteWrites`: 1 if the device supports\n flushing of outstanding remote writes, and 0 otherwise\n\n- :py:obj:`~.cudaDevAttrHostRegisterSupported`: 1 if the device\n supports host memory registration via :py:obj:`~.cudaHostRegister`,\n and 0 otherwise\n\n- :py:obj:`~.cudaDevAttrPageableMemoryAccessUsesHostPageTables`: 1 if\n the device accesses pageable memory via the host's page tables, and 0\n otherwise\n\n- :py:obj:`~.cudaDevAttrDirectManagedMemAccessFromHost`: 1 if the host\n can directly access managed memory on the device without migration,\n and 0 otherwise\n\n- :py:obj:`~.cudaDevAttrMaxSharedMemoryPerBlockOptin`: Maximum per\n block shared memory size on the device. This value can be opted into\n when using :py:obj:`~.cudaFuncSetAttribute`\n\n- :py:obj:`~.cudaDevAttrMaxBlocksPerMultiprocessor`: Maximum number of\n thread blocks that can reside on a multiprocessor\n\n- :py:obj:`~.cudaDevAttrMaxPersistingL2CacheSize`: Maximum L2\n persisting lines capacity setting in bytes\n\n- :py:obj:`~.cudaDevAttrMaxAccessPolicyWindowSize`: Maximum value of\n :py:obj:`~.cudaAccessPolicyWindow.num_bytes`\n\n- :py:obj:`~.cudaDevAttrReservedSharedMemoryPerBlock`: Shared memory\n reserved by CUDA driver per block in bytes\n\n- :py:obj:`~.cudaDevAttrSparseCudaArraySupported`: 1 if the device\n supports sparse CUDA arrays and sparse CUDA mipmapped arrays.\n\n- :py:obj:`~.cudaDevAttrHostRegisterReadOnlySupported`: Device supports\n using the :py:obj:`~.cudaHostRegis""ter` flag cudaHostRegisterReadOnly\n to register memory that must be mapped as read-only to the GPU\n\n- :py:obj:`~.cudaDevAttrMemoryPoolsSupported`: 1 if the device supports\n using the cudaMallocAsync and cudaMemPool family of APIs, and 0\n otherwise\n\n- :py:obj:`~.cudaDevAttrGPUDirectRDMASupported`: 1 if the device\n supports GPUDirect RDMA APIs, and 0 otherwise\n\n- :py:obj:`~.cudaDevAttrGPUDirectRDMAFlushWritesOptions`: bitmask to be\n interpreted according to the\n :py:obj:`~.cudaFlushGPUDirectRDMAWritesOptions` enum\n\n- :py:obj:`~.cudaDevAttrGPUDirectRDMAWritesOrdering`: see the\n :py:obj:`~.cudaGPUDirectRDMAWritesOrdering` enum for numerical values\n\n- :py:obj:`~.cudaDevAttrMemoryPoolSupportedHandleTypes`: Bitmask of\n handle types supported with mempool based IPC\n\n- :py:obj:`~.cudaDevAttrDeferredMappingCudaArraySupported` : 1 if the\n device supports deferred mapping CUDA arrays and CUDA mipmapped\n arrays.\n\n- :py:obj:`~.cudaDevAttrIpcEventSupport`: 1 if the device supports IPC\n Events.\n\n- :py:obj:`~.cudaDevAttrNumaConfig`: NUMA configuration of a device:\n value is of type :py:obj:`~.cudaDeviceNumaConfig` enum\n\n- :py:obj:`~.cudaDevAttrNumaId`: NUMA node ID of the GPU memory\n\n- :py:obj:`~.cudaDevAttrGpuPciDeviceId`: The combined 16-bit PCI device\n ID and 16-bit PCI vendor ID.\n\n- :py:obj:`~.cudaDevAttrGpuPciSubsystemId`: The combined 16-bit PCI\n subsystem ID and 16-bit PCI vendor subsystem ID.\n\nParameters\n----------\nattr : :py:obj:`~.cudaDeviceAttr`\n Device attribute to query\ndevice : int\n Device number to query\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidDevice`, :py:obj:`~.cudaErrorInvalidValue`\nvalue : int\n Returned device attribute value\n\nSee Also\n--------\n:py:obj:`~.cudaGetDeviceCount`, :py:obj:`~.cudaGetDevice`, :py:obj:`~.cudaSetDevice`, :py:obj:`~.cudaChooseDevice`, :py:obj:`~.cudaGetDeviceProperties`, :py:obj:`~.cudaInitDevice`, :py:obj:`~.cuDeviceGet""Attribute`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_53cudaDeviceGetAttribute = {"cudaDeviceGetAttribute", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_53cudaDeviceGetAttribute, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_52cudaDeviceGetAttribute}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_53cudaDeviceGetAttribute(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_attr = 0; int __pyx_v_device; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaDeviceGetAttribute (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_attr,&__pyx_mstate_global->__pyx_n_u_device_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 15356, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 15356, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15356, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaDeviceGetAttribute", 0) < (0)) __PYX_ERR(0, 15356, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaDeviceGetAttribute", 1, 2, 2, i); __PYX_ERR(0, 15356, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15356, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 15356, __pyx_L3_error) } __pyx_v_attr = values[0]; __pyx_v_device = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_device == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 15357, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaDeviceGetAttribute", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 15356, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceGetAttribute", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_attr) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "attr"); __PYX_ERR(0, 15357, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_52cudaDeviceGetAttribute(__pyx_self, __pyx_v_attr, __pyx_v_device); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_52cudaDeviceGetAttribute(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_attr, int __pyx_v_device) { int __pyx_v_value; enum cudaDeviceAttr __pyx_v_cyattr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; enum cudaDeviceAttr __pyx_t_2; cudaError_t __pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaDeviceGetAttribute", 0); /* "cuda/bindings/runtime.pyx":15724 * :py:obj:`~.cudaGetDeviceCount`, :py:obj:`~.cudaGetDevice`, :py:obj:`~.cudaSetDevice`, :py:obj:`~.cudaChooseDevice`, :py:obj:`~.cudaGetDeviceProperties`, :py:obj:`~.cudaInitDevice`, :py:obj:`~.cuDeviceGetAttribute` * """ * cdef int value = 0 # <<<<<<<<<<<<<< * cdef cyruntime.cudaDeviceAttr cyattr = attr.value * with nogil: */ __pyx_v_value = 0; /* "cuda/bindings/runtime.pyx":15725 * """ * cdef int value = 0 * cdef cyruntime.cudaDeviceAttr cyattr = attr.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaDeviceGetAttribute(&value, cyattr, device) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_attr, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15725, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = ((enum cudaDeviceAttr)__Pyx_PyLong_As_enum__cudaDeviceAttr(__pyx_t_1)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 15725, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cyattr = __pyx_t_2; /* "cuda/bindings/runtime.pyx":15726 * cdef int value = 0 * cdef cyruntime.cudaDeviceAttr cyattr = attr.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceGetAttribute(&value, cyattr, device) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":15727 * cdef cyruntime.cudaDeviceAttr cyattr = attr.value * with nogil: * err = cyruntime.cudaDeviceGetAttribute(&value, cyattr, device) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_3 = __pyx_f_4cuda_8bindings_9cyruntime_cudaDeviceGetAttribute((&__pyx_v_value), __pyx_v_cyattr, __pyx_v_device); if (unlikely(__pyx_t_3 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 15727, __pyx_L4_error) __pyx_v_err = __pyx_t_3; } /* "cuda/bindings/runtime.pyx":15726 * cdef int value = 0 * cdef cyruntime.cudaDeviceAttr cyattr = attr.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceGetAttribute(&value, cyattr, device) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":15728 * with nogil: * err = cyruntime.cudaDeviceGetAttribute(&value, cyattr, device) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], value) */ __pyx_t_4 = (__pyx_v_err != cudaSuccess); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":15729 * err = cyruntime.cudaDeviceGetAttribute(&value, cyattr, device) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], value) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15729, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15729, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15729, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15729, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 15729, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, Py_None) != (0)) __PYX_ERR(0, 15729, __pyx_L1_error); __pyx_t_6 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":15728 * with nogil: * err = cyruntime.cudaDeviceGetAttribute(&value, cyattr, device) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], value) */ } /* "cuda/bindings/runtime.pyx":15730 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], value) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15730, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15730, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15730, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyLong_From_int(__pyx_v_value); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15730, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15730, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 15730, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_6); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_6) != (0)) __PYX_ERR(0, 15730, __pyx_L1_error); __pyx_t_1 = 0; __pyx_t_6 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":15356 * return (_dict_cudaError_t[err], prop) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceGetAttribute(attr not None : cudaDeviceAttr, int device): * """ Returns information about the device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceGetAttribute", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":15732 * return (_dict_cudaError_t[err], value) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceGetDefaultMemPool(int device): * """ Returns the default mempool of a device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_55cudaDeviceGetDefaultMemPool(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_54cudaDeviceGetDefaultMemPool, "cudaDeviceGetDefaultMemPool(int device)\n\nReturns the default mempool of a device.\n\nThe default mempool of a device contains device memory from that\ndevice.\n\nParameters\n----------\ndevice : int\n None\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidDevice`, :py:obj:`~.cudaErrorInvalidValue` :py:obj:`~.cudaErrorNotSupported`\nmemPool : :py:obj:`~.cudaMemPool_t`\n None\n\nSee Also\n--------\n:py:obj:`~.cuDeviceGetDefaultMemPool`, :py:obj:`~.cudaMallocAsync`, :py:obj:`~.cudaMemPoolTrimTo`, :py:obj:`~.cudaMemPoolGetAttribute`, :py:obj:`~.cudaDeviceSetMemPool`, :py:obj:`~.cudaMemPoolSetAttribute`, :py:obj:`~.cudaMemPoolSetAccess`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_55cudaDeviceGetDefaultMemPool = {"cudaDeviceGetDefaultMemPool", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_55cudaDeviceGetDefaultMemPool, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_54cudaDeviceGetDefaultMemPool}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_55cudaDeviceGetDefaultMemPool(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { int __pyx_v_device; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaDeviceGetDefaultMemPool (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 15732, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15732, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaDeviceGetDefaultMemPool", 0) < (0)) __PYX_ERR(0, 15732, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaDeviceGetDefaultMemPool", 1, 1, 1, i); __PYX_ERR(0, 15732, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15732, __pyx_L3_error) } __pyx_v_device = __Pyx_PyLong_As_int(values[0]); if (unlikely((__pyx_v_device == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 15733, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaDeviceGetDefaultMemPool", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 15732, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceGetDefaultMemPool", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_54cudaDeviceGetDefaultMemPool(__pyx_self, __pyx_v_device); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_54cudaDeviceGetDefaultMemPool(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_device) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemPool_t *__pyx_v_memPool = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; cudaError_t __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaDeviceGetDefaultMemPool", 0); /* "cuda/bindings/runtime.pyx":15755 * :py:obj:`~.cuDeviceGetDefaultMemPool`, :py:obj:`~.cudaMallocAsync`, :py:obj:`~.cudaMemPoolTrimTo`, :py:obj:`~.cudaMemPoolGetAttribute`, :py:obj:`~.cudaDeviceSetMemPool`, :py:obj:`~.cudaMemPoolSetAttribute`, :py:obj:`~.cudaMemPoolSetAccess` * """ * cdef cudaMemPool_t memPool = cudaMemPool_t() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaDeviceGetDefaultMemPool(memPool._pvt_ptr, device) */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (1-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15755, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_memPool = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemPool_t *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":15756 * """ * cdef cudaMemPool_t memPool = cudaMemPool_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceGetDefaultMemPool(memPool._pvt_ptr, device) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":15757 * cdef cudaMemPool_t memPool = cudaMemPool_t() * with nogil: * err = cyruntime.cudaDeviceGetDefaultMemPool(memPool._pvt_ptr, device) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_5 = __pyx_f_4cuda_8bindings_9cyruntime_cudaDeviceGetDefaultMemPool(((cudaMemPool_t *)__pyx_v_memPool->__pyx_base._pvt_ptr), __pyx_v_device); if (unlikely(__pyx_t_5 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 15757, __pyx_L4_error) __pyx_v_err = __pyx_t_5; } /* "cuda/bindings/runtime.pyx":15756 * """ * cdef cudaMemPool_t memPool = cudaMemPool_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceGetDefaultMemPool(memPool._pvt_ptr, device) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":15758 * with nogil: * err = cyruntime.cudaDeviceGetDefaultMemPool(memPool._pvt_ptr, device) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], memPool) */ __pyx_t_6 = (__pyx_v_err != cudaSuccess); if (__pyx_t_6) { /* "cuda/bindings/runtime.pyx":15759 * err = cyruntime.cudaDeviceGetDefaultMemPool(memPool._pvt_ptr, device) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], memPool) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15759, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15759, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15759, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15759, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 15759, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 15759, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":15758 * with nogil: * err = cyruntime.cudaDeviceGetDefaultMemPool(memPool._pvt_ptr, device) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], memPool) */ } /* "cuda/bindings/runtime.pyx":15760 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], memPool) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15760, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15760, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15760, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15760, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 15760, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_memPool); __Pyx_GIVEREF((PyObject *)__pyx_v_memPool); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_memPool)) != (0)) __PYX_ERR(0, 15760, __pyx_L1_error); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":15732 * return (_dict_cudaError_t[err], value) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceGetDefaultMemPool(int device): * """ Returns the default mempool of a device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceGetDefaultMemPool", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_memPool); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":15762 * return (_dict_cudaError_t[err], memPool) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceSetMemPool(int device, memPool): * """ Sets the current memory pool of a device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_57cudaDeviceSetMemPool(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_56cudaDeviceSetMemPool, "cudaDeviceSetMemPool(int device, memPool)\n\nSets the current memory pool of a device.\n\nThe memory pool must be local to the specified device. Unless a mempool\nis specified in the :py:obj:`~.cudaMallocAsync` call,\n:py:obj:`~.cudaMallocAsync` allocates from the current mempool of the\nprovided stream's device. By default, a device's current memory pool is\nits default memory pool.\n\nParameters\n----------\ndevice : int\n None\nmemPool : :py:obj:`~.CUmemoryPool` or :py:obj:`~.cudaMemPool_t`\n None\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue` :py:obj:`~.cudaErrorInvalidDevice` :py:obj:`~.cudaErrorNotSupported`\n\nSee Also\n--------\n:py:obj:`~.cuDeviceSetMemPool`, :py:obj:`~.cudaDeviceGetMemPool`, :py:obj:`~.cudaDeviceGetDefaultMemPool`, :py:obj:`~.cudaMemPoolCreate`, :py:obj:`~.cudaMemPoolDestroy`, :py:obj:`~.cudaMallocFromPoolAsync`\n\nNotes\n-----\nUse :py:obj:`~.cudaMallocFromPoolAsync` to specify asynchronous allocations from a device different than the one the stream runs on."); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_57cudaDeviceSetMemPool = {"cudaDeviceSetMemPool", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_57cudaDeviceSetMemPool, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_56cudaDeviceSetMemPool}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_57cudaDeviceSetMemPool(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { int __pyx_v_device; PyObject *__pyx_v_memPool = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaDeviceSetMemPool (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_memPool,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 15762, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 15762, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15762, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaDeviceSetMemPool", 0) < (0)) __PYX_ERR(0, 15762, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaDeviceSetMemPool", 1, 2, 2, i); __PYX_ERR(0, 15762, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15762, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 15762, __pyx_L3_error) } __pyx_v_device = __Pyx_PyLong_As_int(values[0]); if (unlikely((__pyx_v_device == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 15763, __pyx_L3_error) __pyx_v_memPool = values[1]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaDeviceSetMemPool", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 15762, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceSetMemPool", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_56cudaDeviceSetMemPool(__pyx_self, __pyx_v_device, __pyx_v_memPool); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_56cudaDeviceSetMemPool(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_device, PyObject *__pyx_v_memPool) { cudaMemPool_t __pyx_v_cymemPool; PyObject *__pyx_v_pmemPool = NULL; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaDeviceSetMemPool", 0); /* "cuda/bindings/runtime.pyx":15793 * """ * cdef cyruntime.cudaMemPool_t cymemPool * if memPool is None: # <<<<<<<<<<<<<< * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): */ __pyx_t_1 = (__pyx_v_memPool == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":15794 * cdef cyruntime.cudaMemPool_t cymemPool * if memPool is None: * pmemPool = 0 # <<<<<<<<<<<<<< * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): * pmemPool = int(memPool) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pmemPool = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":15793 * """ * cdef cyruntime.cudaMemPool_t cymemPool * if memPool is None: # <<<<<<<<<<<<<< * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":15795 * if memPool is None: * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): # <<<<<<<<<<<<<< * pmemPool = int(memPool) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_memPool, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_memPool, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUmemoryPool); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":15796 * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): * pmemPool = int(memPool) # <<<<<<<<<<<<<< * else: * pmemPool = int(cudaMemPool_t(memPool)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_memPool); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15796, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pmemPool = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":15795 * if memPool is None: * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): # <<<<<<<<<<<<<< * pmemPool = int(memPool) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":15798 * pmemPool = int(memPool) * else: * pmemPool = int(cudaMemPool_t(memPool)) # <<<<<<<<<<<<<< * cymemPool = pmemPool * with nogil: */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_memPool}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15798, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15798, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pmemPool = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":15799 * else: * pmemPool = int(cudaMemPool_t(memPool)) * cymemPool = pmemPool # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaDeviceSetMemPool(device, cymemPool) */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pmemPool); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 15799, __pyx_L1_error) __pyx_v_cymemPool = ((cudaMemPool_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":15800 * pmemPool = int(cudaMemPool_t(memPool)) * cymemPool = pmemPool * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceSetMemPool(device, cymemPool) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":15801 * cymemPool = pmemPool * with nogil: * err = cyruntime.cudaDeviceSetMemPool(device, cymemPool) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaDeviceSetMemPool(__pyx_v_device, __pyx_v_cymemPool); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 15801, __pyx_L7_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":15800 * pmemPool = int(cudaMemPool_t(memPool)) * cymemPool = pmemPool * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceSetMemPool(device, cymemPool) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":15802 * with nogil: * err = cyruntime.cudaDeviceSetMemPool(device, cymemPool) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15802, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15802, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15802, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15802, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 15802, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":15762 * return (_dict_cudaError_t[err], memPool) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceSetMemPool(int device, memPool): * """ Sets the current memory pool of a device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceSetMemPool", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pmemPool); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":15804 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceGetMemPool(int device): * """ Gets the current mempool for a device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_59cudaDeviceGetMemPool(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_58cudaDeviceGetMemPool, "cudaDeviceGetMemPool(int device)\n\nGets the current mempool for a device.\n\nReturns the last pool provided to :py:obj:`~.cudaDeviceSetMemPool` for\nthis device or the device's default memory pool if\n:py:obj:`~.cudaDeviceSetMemPool` has never been called. By default the\ncurrent mempool is the default mempool for a device, otherwise the\nreturned pool must have been set with :py:obj:`~.cuDeviceSetMemPool` or\n:py:obj:`~.cudaDeviceSetMemPool`.\n\nParameters\n----------\ndevice : int\n None\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue` :py:obj:`~.cudaErrorNotSupported`\nmemPool : :py:obj:`~.cudaMemPool_t`\n None\n\nSee Also\n--------\n:py:obj:`~.cuDeviceGetMemPool`, :py:obj:`~.cudaDeviceGetDefaultMemPool`, :py:obj:`~.cudaDeviceSetMemPool`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_59cudaDeviceGetMemPool = {"cudaDeviceGetMemPool", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_59cudaDeviceGetMemPool, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_58cudaDeviceGetMemPool}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_59cudaDeviceGetMemPool(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { int __pyx_v_device; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaDeviceGetMemPool (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 15804, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15804, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaDeviceGetMemPool", 0) < (0)) __PYX_ERR(0, 15804, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaDeviceGetMemPool", 1, 1, 1, i); __PYX_ERR(0, 15804, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15804, __pyx_L3_error) } __pyx_v_device = __Pyx_PyLong_As_int(values[0]); if (unlikely((__pyx_v_device == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 15805, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaDeviceGetMemPool", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 15804, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceGetMemPool", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_58cudaDeviceGetMemPool(__pyx_self, __pyx_v_device); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_58cudaDeviceGetMemPool(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_device) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemPool_t *__pyx_v_memPool = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; cudaError_t __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaDeviceGetMemPool", 0); /* "cuda/bindings/runtime.pyx":15831 * :py:obj:`~.cuDeviceGetMemPool`, :py:obj:`~.cudaDeviceGetDefaultMemPool`, :py:obj:`~.cudaDeviceSetMemPool` * """ * cdef cudaMemPool_t memPool = cudaMemPool_t() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaDeviceGetMemPool(memPool._pvt_ptr, device) */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (1-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15831, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_memPool = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemPool_t *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":15832 * """ * cdef cudaMemPool_t memPool = cudaMemPool_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceGetMemPool(memPool._pvt_ptr, device) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":15833 * cdef cudaMemPool_t memPool = cudaMemPool_t() * with nogil: * err = cyruntime.cudaDeviceGetMemPool(memPool._pvt_ptr, device) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_5 = __pyx_f_4cuda_8bindings_9cyruntime_cudaDeviceGetMemPool(((cudaMemPool_t *)__pyx_v_memPool->__pyx_base._pvt_ptr), __pyx_v_device); if (unlikely(__pyx_t_5 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 15833, __pyx_L4_error) __pyx_v_err = __pyx_t_5; } /* "cuda/bindings/runtime.pyx":15832 * """ * cdef cudaMemPool_t memPool = cudaMemPool_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceGetMemPool(memPool._pvt_ptr, device) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":15834 * with nogil: * err = cyruntime.cudaDeviceGetMemPool(memPool._pvt_ptr, device) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], memPool) */ __pyx_t_6 = (__pyx_v_err != cudaSuccess); if (__pyx_t_6) { /* "cuda/bindings/runtime.pyx":15835 * err = cyruntime.cudaDeviceGetMemPool(memPool._pvt_ptr, device) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], memPool) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15835, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15835, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15835, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15835, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 15835, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 15835, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":15834 * with nogil: * err = cyruntime.cudaDeviceGetMemPool(memPool._pvt_ptr, device) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], memPool) */ } /* "cuda/bindings/runtime.pyx":15836 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], memPool) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15836, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15836, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15836, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15836, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 15836, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_memPool); __Pyx_GIVEREF((PyObject *)__pyx_v_memPool); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_memPool)) != (0)) __PYX_ERR(0, 15836, __pyx_L1_error); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":15804 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceGetMemPool(int device): * """ Gets the current mempool for a device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceGetMemPool", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_memPool); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":15838 * return (_dict_cudaError_t[err], memPool) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceGetNvSciSyncAttributes(nvSciSyncAttrList, int device, int flags): * """ Return NvSciSync attributes that this device can support. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_61cudaDeviceGetNvSciSyncAttributes(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_60cudaDeviceGetNvSciSyncAttributes, "cudaDeviceGetNvSciSyncAttributes(nvSciSyncAttrList, int device, int flags)\n\nReturn NvSciSync attributes that this device can support.\n\nReturns in `nvSciSyncAttrList`, the properties of NvSciSync that this\nCUDA device, `dev` can support. The returned `nvSciSyncAttrList` can be\nused to create an NvSciSync that matches this device's capabilities.\n\nIf NvSciSyncAttrKey_RequiredPerm field in `nvSciSyncAttrList` is\nalready set this API will return :py:obj:`~.cudaErrorInvalidValue`.\n\nThe applications should set `nvSciSyncAttrList` to a valid\nNvSciSyncAttrList failing which this API will return\n:py:obj:`~.cudaErrorInvalidHandle`.\n\nThe `flags` controls how applications intends to use the NvSciSync\ncreated from the `nvSciSyncAttrList`. The valid flags are:\n\n- :py:obj:`~.cudaNvSciSyncAttrSignal`, specifies that the applications\n intends to signal an NvSciSync on this CUDA device.\n\n- :py:obj:`~.cudaNvSciSyncAttrWait`, specifies that the applications\n intends to wait on an NvSciSync on this CUDA device.\n\nAt least one of these flags must be set, failing which the API returns\n:py:obj:`~.cudaErrorInvalidValue`. Both the flags are orthogonal to one\nanother: a developer may set both these flags that allows to set both\nwait and signal specific attributes in the same `nvSciSyncAttrList`.\n\nNote that this API updates the input `nvSciSyncAttrList` with values\nequivalent to the following public attribute key-values:\nNvSciSyncAttrKey_RequiredPerm is set to\n\n- NvSciSyncAccessPerm_SignalOnly if :py:obj:`~.cudaNvSciSyncAttrSignal`\n is set in `flags`.\n\n- NvSciSyncAccessPerm_WaitOnly if :py:obj:`~.cudaNvSciSyncAttrWait` is\n set in `flags`.\n\n- NvSciSyncAccessPerm_WaitSignal if both\n :py:obj:`~.cudaNvSciSyncAttrWait` and\n :py:obj:`~.cudaNvSciSyncAttrSignal` are set in `flags`.\n NvSciSyncAttrKey_PrimitiveInfo is set to\n\n- NvSciSyncAttrValPrimitiveType_SysmemSemaphore on any valid `device`.\n\n- NvSciSyncAttrValPrimitiveType_Syncpoint if `device` is ""a Tegra\n device.\n\n- NvSciSyncAttrValPrimitiveType_SysmemSemaphorePayload64b if `device`\n is GA10X+. NvSciSyncAttrKey_GpuId is set to the same UUID that is\n returned in `None` from :py:obj:`~.cudaDeviceGetProperties` for this\n `device`.\n\n:py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorDeviceUninitialized`,\n:py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidHandle`,\n:py:obj:`~.cudaErrorInvalidDevice`, :py:obj:`~.cudaErrorNotSupported`,\n:py:obj:`~.cudaErrorMemoryAllocation`\n\nParameters\n----------\nnvSciSyncAttrList : Any\n Return NvSciSync attributes supported.\ndevice : int\n Valid Cuda Device to get NvSciSync attributes for.\nflags : int\n flags describing NvSciSync usage.\n\nReturns\n-------\ncudaError_t\n\n\nSee Also\n--------\n:py:obj:`~.cudaImportExternalSemaphore`, :py:obj:`~.cudaDestroyExternalSemaphore`, :py:obj:`~.cudaSignalExternalSemaphoresAsync`, :py:obj:`~.cudaWaitExternalSemaphoresAsync`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_61cudaDeviceGetNvSciSyncAttributes = {"cudaDeviceGetNvSciSyncAttributes", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_61cudaDeviceGetNvSciSyncAttributes, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_60cudaDeviceGetNvSciSyncAttributes}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_61cudaDeviceGetNvSciSyncAttributes(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_nvSciSyncAttrList = 0; int __pyx_v_device; int __pyx_v_flags; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[3] = {0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaDeviceGetNvSciSyncAttributes (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_nvSciSyncAttrList,&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_flags_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 15838, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 15838, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 15838, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15838, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaDeviceGetNvSciSyncAttributes", 0) < (0)) __PYX_ERR(0, 15838, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 3; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaDeviceGetNvSciSyncAttributes", 1, 3, 3, i); __PYX_ERR(0, 15838, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 3)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15838, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 15838, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 15838, __pyx_L3_error) } __pyx_v_nvSciSyncAttrList = values[0]; __pyx_v_device = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_device == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 15839, __pyx_L3_error) __pyx_v_flags = __Pyx_PyLong_As_int(values[2]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 15839, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaDeviceGetNvSciSyncAttributes", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 15838, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceGetNvSciSyncAttributes", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_60cudaDeviceGetNvSciSyncAttributes(__pyx_self, __pyx_v_nvSciSyncAttrList, __pyx_v_device, __pyx_v_flags); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_60cudaDeviceGetNvSciSyncAttributes(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_nvSciSyncAttrList, int __pyx_v_device, int __pyx_v_flags) { struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cynvSciSyncAttrList = NULL; void *__pyx_v_cynvSciSyncAttrList_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_5; cudaError_t __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaDeviceGetNvSciSyncAttributes", 0); /* "cuda/bindings/runtime.pyx":15915 * :py:obj:`~.cudaImportExternalSemaphore`, :py:obj:`~.cudaDestroyExternalSemaphore`, :py:obj:`~.cudaSignalExternalSemaphoresAsync`, :py:obj:`~.cudaWaitExternalSemaphoresAsync` * """ * cynvSciSyncAttrList = _HelperInputVoidPtr(nvSciSyncAttrList) # <<<<<<<<<<<<<< * cdef void* cynvSciSyncAttrList_ptr = cynvSciSyncAttrList.cptr * with nogil: */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_nvSciSyncAttrList}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15915, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_cynvSciSyncAttrList = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":15916 * """ * cynvSciSyncAttrList = _HelperInputVoidPtr(nvSciSyncAttrList) * cdef void* cynvSciSyncAttrList_ptr = cynvSciSyncAttrList.cptr # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaDeviceGetNvSciSyncAttributes(cynvSciSyncAttrList_ptr, device, flags) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cynvSciSyncAttrList), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15916, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_5 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 15916, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cynvSciSyncAttrList_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_5)); /* "cuda/bindings/runtime.pyx":15917 * cynvSciSyncAttrList = _HelperInputVoidPtr(nvSciSyncAttrList) * cdef void* cynvSciSyncAttrList_ptr = cynvSciSyncAttrList.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceGetNvSciSyncAttributes(cynvSciSyncAttrList_ptr, device, flags) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":15918 * cdef void* cynvSciSyncAttrList_ptr = cynvSciSyncAttrList.cptr * with nogil: * err = cyruntime.cudaDeviceGetNvSciSyncAttributes(cynvSciSyncAttrList_ptr, device, flags) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_6 = __pyx_f_4cuda_8bindings_9cyruntime_cudaDeviceGetNvSciSyncAttributes(__pyx_v_cynvSciSyncAttrList_ptr, __pyx_v_device, __pyx_v_flags); if (unlikely(__pyx_t_6 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 15918, __pyx_L4_error) __pyx_v_err = __pyx_t_6; } /* "cuda/bindings/runtime.pyx":15917 * cynvSciSyncAttrList = _HelperInputVoidPtr(nvSciSyncAttrList) * cdef void* cynvSciSyncAttrList_ptr = cynvSciSyncAttrList.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceGetNvSciSyncAttributes(cynvSciSyncAttrList_ptr, device, flags) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":15919 * with nogil: * err = cyruntime.cudaDeviceGetNvSciSyncAttributes(cynvSciSyncAttrList_ptr, device, flags) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15919, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15919, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15919, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15919, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 15919, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":15838 * return (_dict_cudaError_t[err], memPool) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceGetNvSciSyncAttributes(nvSciSyncAttrList, int device, int flags): * """ Return NvSciSync attributes that this device can support. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceGetNvSciSyncAttributes", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_cynvSciSyncAttrList); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":15921 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceGetP2PAttribute(attr not None : cudaDeviceP2PAttr, int srcDevice, int dstDevice): * """ Queries attributes of the link between two devices. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_63cudaDeviceGetP2PAttribute(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_62cudaDeviceGetP2PAttribute, "cudaDeviceGetP2PAttribute(attr: cudaDeviceP2PAttr, int srcDevice, int dstDevice)\n\nQueries attributes of the link between two devices.\n\nReturns in `*value` the value of the requested attribute `attrib` of\nthe link between `srcDevice` and `dstDevice`. The supported attributes\nare:\n\n- :py:obj:`~.cudaDevP2PAttrPerformanceRank`: A relative value\n indicating the performance of the link between two devices. Lower\n value means better performance (0 being the value used for most\n performant link).\n\n- :py:obj:`~.cudaDevP2PAttrAccessSupported`: 1 if peer access is\n enabled.\n\n- :py:obj:`~.cudaDevP2PAttrNativeAtomicSupported`: 1 if native atomic\n operations over the link are supported.\n\n- :py:obj:`~.cudaDevP2PAttrCudaArrayAccessSupported`: 1 if accessing\n CUDA arrays over the link is supported.\n\nReturns :py:obj:`~.cudaErrorInvalidDevice` if `srcDevice` or\n`dstDevice` are not valid or if they represent the same device.\n\nReturns :py:obj:`~.cudaErrorInvalidValue` if `attrib` is not valid or\nif `value` is a null pointer.\n\nParameters\n----------\nattrib : :py:obj:`~.cudaDeviceP2PAttr`\n The requested attribute of the link between `srcDevice` and\n `dstDevice`.\nsrcDevice : int\n The source device of the target link.\ndstDevice : int\n The destination device of the target link.\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidDevice`, :py:obj:`~.cudaErrorInvalidValue`\nvalue : int\n Returned value of the requested attribute\n\nSee Also\n--------\n:py:obj:`~.cudaDeviceEnablePeerAccess`, :py:obj:`~.cudaDeviceDisablePeerAccess`, :py:obj:`~.cudaDeviceCanAccessPeer`, :py:obj:`~.cuDeviceGetP2PAttribute`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_63cudaDeviceGetP2PAttribute = {"cudaDeviceGetP2PAttribute", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_63cudaDeviceGetP2PAttribute, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_62cudaDeviceGetP2PAttribute}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_63cudaDeviceGetP2PAttribute(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_attr = 0; int __pyx_v_srcDevice; int __pyx_v_dstDevice; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[3] = {0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaDeviceGetP2PAttribute (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_attr,&__pyx_mstate_global->__pyx_n_u_srcDevice_2,&__pyx_mstate_global->__pyx_n_u_dstDevice_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 15921, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 15921, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 15921, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15921, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaDeviceGetP2PAttribute", 0) < (0)) __PYX_ERR(0, 15921, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 3; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaDeviceGetP2PAttribute", 1, 3, 3, i); __PYX_ERR(0, 15921, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 3)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15921, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 15921, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 15921, __pyx_L3_error) } __pyx_v_attr = values[0]; __pyx_v_srcDevice = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_srcDevice == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 15922, __pyx_L3_error) __pyx_v_dstDevice = __Pyx_PyLong_As_int(values[2]); if (unlikely((__pyx_v_dstDevice == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 15922, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaDeviceGetP2PAttribute", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 15921, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceGetP2PAttribute", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_attr) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "attr"); __PYX_ERR(0, 15922, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_62cudaDeviceGetP2PAttribute(__pyx_self, __pyx_v_attr, __pyx_v_srcDevice, __pyx_v_dstDevice); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_62cudaDeviceGetP2PAttribute(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_attr, int __pyx_v_srcDevice, int __pyx_v_dstDevice) { int __pyx_v_value; enum cudaDeviceP2PAttr __pyx_v_cyattr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; enum cudaDeviceP2PAttr __pyx_t_2; cudaError_t __pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaDeviceGetP2PAttribute", 0); /* "cuda/bindings/runtime.pyx":15970 * :py:obj:`~.cudaDeviceEnablePeerAccess`, :py:obj:`~.cudaDeviceDisablePeerAccess`, :py:obj:`~.cudaDeviceCanAccessPeer`, :py:obj:`~.cuDeviceGetP2PAttribute` * """ * cdef int value = 0 # <<<<<<<<<<<<<< * cdef cyruntime.cudaDeviceP2PAttr cyattr = attr.value * with nogil: */ __pyx_v_value = 0; /* "cuda/bindings/runtime.pyx":15971 * """ * cdef int value = 0 * cdef cyruntime.cudaDeviceP2PAttr cyattr = attr.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaDeviceGetP2PAttribute(&value, cyattr, srcDevice, dstDevice) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_attr, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15971, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = ((enum cudaDeviceP2PAttr)__Pyx_PyLong_As_enum__cudaDeviceP2PAttr(__pyx_t_1)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 15971, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cyattr = __pyx_t_2; /* "cuda/bindings/runtime.pyx":15972 * cdef int value = 0 * cdef cyruntime.cudaDeviceP2PAttr cyattr = attr.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceGetP2PAttribute(&value, cyattr, srcDevice, dstDevice) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":15973 * cdef cyruntime.cudaDeviceP2PAttr cyattr = attr.value * with nogil: * err = cyruntime.cudaDeviceGetP2PAttribute(&value, cyattr, srcDevice, dstDevice) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_3 = __pyx_f_4cuda_8bindings_9cyruntime_cudaDeviceGetP2PAttribute((&__pyx_v_value), __pyx_v_cyattr, __pyx_v_srcDevice, __pyx_v_dstDevice); if (unlikely(__pyx_t_3 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 15973, __pyx_L4_error) __pyx_v_err = __pyx_t_3; } /* "cuda/bindings/runtime.pyx":15972 * cdef int value = 0 * cdef cyruntime.cudaDeviceP2PAttr cyattr = attr.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceGetP2PAttribute(&value, cyattr, srcDevice, dstDevice) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":15974 * with nogil: * err = cyruntime.cudaDeviceGetP2PAttribute(&value, cyattr, srcDevice, dstDevice) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], value) */ __pyx_t_4 = (__pyx_v_err != cudaSuccess); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":15975 * err = cyruntime.cudaDeviceGetP2PAttribute(&value, cyattr, srcDevice, dstDevice) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], value) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15975, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15975, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15975, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15975, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 15975, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, Py_None) != (0)) __PYX_ERR(0, 15975, __pyx_L1_error); __pyx_t_6 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":15974 * with nogil: * err = cyruntime.cudaDeviceGetP2PAttribute(&value, cyattr, srcDevice, dstDevice) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], value) */ } /* "cuda/bindings/runtime.pyx":15976 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], value) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15976, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15976, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15976, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyLong_From_int(__pyx_v_value); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 15976, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 15976, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 15976, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_6); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_6) != (0)) __PYX_ERR(0, 15976, __pyx_L1_error); __pyx_t_1 = 0; __pyx_t_6 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":15921 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceGetP2PAttribute(attr not None : cudaDeviceP2PAttr, int srcDevice, int dstDevice): * """ Queries attributes of the link between two devices. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceGetP2PAttribute", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":15978 * return (_dict_cudaError_t[err], value) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaChooseDevice(prop : Optional[cudaDeviceProp]): * """ Select compute-device which best matches criteria. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_65cudaChooseDevice(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_64cudaChooseDevice, "cudaChooseDevice(cudaDeviceProp prop: Optional[cudaDeviceProp])\n\nSelect compute-device which best matches criteria.\n\nReturns in `*device` the device which has properties that best match\n`*prop`.\n\nParameters\n----------\nprop : :py:obj:`~.cudaDeviceProp`\n Desired device properties\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\ndevice : int\n Device with best match\n\nSee Also\n--------\n:py:obj:`~.cudaGetDeviceCount`, :py:obj:`~.cudaGetDevice`, :py:obj:`~.cudaSetDevice`, :py:obj:`~.cudaGetDeviceProperties`, :py:obj:`~.cudaInitDevice`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_65cudaChooseDevice = {"cudaChooseDevice", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_65cudaChooseDevice, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_64cudaChooseDevice}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_65cudaChooseDevice(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaDeviceProp *__pyx_v_prop = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaChooseDevice (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_prop,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 15978, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15978, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaChooseDevice", 0) < (0)) __PYX_ERR(0, 15978, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaChooseDevice", 1, 1, 1, i); __PYX_ERR(0, 15978, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 15978, __pyx_L3_error) } __pyx_v_prop = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaDeviceProp *)values[0]); } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaChooseDevice", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 15978, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaChooseDevice", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_prop), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaDeviceProp, 1, "prop", 0))) __PYX_ERR(0, 15979, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_64cudaChooseDevice(__pyx_self, __pyx_v_prop); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_64cudaChooseDevice(CYTHON_UNUSED PyObject *__pyx_self, struct __pyx_obj_4cuda_8bindings_7runtime_cudaDeviceProp *__pyx_v_prop) { int __pyx_v_device; struct cudaDeviceProp *__pyx_v_cyprop_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations struct cudaDeviceProp *__pyx_t_1; int __pyx_t_2; cudaError_t __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaChooseDevice", 0); /* "cuda/bindings/runtime.pyx":16001 * :py:obj:`~.cudaGetDeviceCount`, :py:obj:`~.cudaGetDevice`, :py:obj:`~.cudaSetDevice`, :py:obj:`~.cudaGetDeviceProperties`, :py:obj:`~.cudaInitDevice` * """ * cdef int device = 0 # <<<<<<<<<<<<<< * cdef cyruntime.cudaDeviceProp* cyprop_ptr = prop._pvt_ptr if prop is not None else NULL * with nogil: */ __pyx_v_device = 0; /* "cuda/bindings/runtime.pyx":16002 * """ * cdef int device = 0 * cdef cyruntime.cudaDeviceProp* cyprop_ptr = prop._pvt_ptr if prop is not None else NULL # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaChooseDevice(&device, cyprop_ptr) */ __pyx_t_2 = (((PyObject *)__pyx_v_prop) != Py_None); if (__pyx_t_2) { __pyx_t_1 = __pyx_v_prop->_pvt_ptr; } else { __pyx_t_1 = NULL; } __pyx_v_cyprop_ptr = __pyx_t_1; /* "cuda/bindings/runtime.pyx":16003 * cdef int device = 0 * cdef cyruntime.cudaDeviceProp* cyprop_ptr = prop._pvt_ptr if prop is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaChooseDevice(&device, cyprop_ptr) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":16004 * cdef cyruntime.cudaDeviceProp* cyprop_ptr = prop._pvt_ptr if prop is not None else NULL * with nogil: * err = cyruntime.cudaChooseDevice(&device, cyprop_ptr) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_3 = __pyx_f_4cuda_8bindings_9cyruntime_cudaChooseDevice((&__pyx_v_device), __pyx_v_cyprop_ptr); if (unlikely(__pyx_t_3 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 16004, __pyx_L4_error) __pyx_v_err = __pyx_t_3; } /* "cuda/bindings/runtime.pyx":16003 * cdef int device = 0 * cdef cyruntime.cudaDeviceProp* cyprop_ptr = prop._pvt_ptr if prop is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaChooseDevice(&device, cyprop_ptr) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":16005 * with nogil: * err = cyruntime.cudaChooseDevice(&device, cyprop_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], device) */ __pyx_t_2 = (__pyx_v_err != cudaSuccess); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":16006 * err = cyruntime.cudaChooseDevice(&device, cyprop_ptr) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], device) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16006, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16006, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16006, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16006, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 16006, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, Py_None) != (0)) __PYX_ERR(0, 16006, __pyx_L1_error); __pyx_t_6 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":16005 * with nogil: * err = cyruntime.cudaChooseDevice(&device, cyprop_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], device) */ } /* "cuda/bindings/runtime.pyx":16007 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], device) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16007, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16007, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_6); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16007, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyLong_From_int(__pyx_v_device); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 16007, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16007, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 16007, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_6); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_6) != (0)) __PYX_ERR(0, 16007, __pyx_L1_error); __pyx_t_4 = 0; __pyx_t_6 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":15978 * return (_dict_cudaError_t[err], value) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaChooseDevice(prop : Optional[cudaDeviceProp]): * """ Select compute-device which best matches criteria. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("cuda.bindings.runtime.cudaChooseDevice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":16009 * return (_dict_cudaError_t[err], device) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaInitDevice(int device, unsigned int deviceFlags, unsigned int flags): * """ Initialize device to be used for GPU executions. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_67cudaInitDevice(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_66cudaInitDevice, "cudaInitDevice(int device, unsigned int deviceFlags, unsigned int flags)\n\nInitialize device to be used for GPU executions.\n\nThis function will initialize the CUDA Runtime structures and primary\ncontext on `device` when called, but the context will not be made\ncurrent to `device`.\n\nWhen :py:obj:`~.cudaInitDeviceFlagsAreValid` is set in `flags`,\ndeviceFlags are applied to the requested device. The values of\ndeviceFlags match those of the flags parameters in\n:py:obj:`~.cudaSetDeviceFlags`. The effect may be verified by\n:py:obj:`~.cudaGetDeviceFlags`.\n\nThis function will return an error if the device is in\n:py:obj:`~.cudaComputeModeExclusiveProcess` and is occupied by another\nprocess or if the device is in :py:obj:`~.cudaComputeModeProhibited`.\n\nParameters\n----------\ndevice : int\n Device on which the runtime will initialize itself.\ndeviceFlags : unsigned int\n Parameters for device operation.\nflags : unsigned int\n Flags for controlling the device initialization.\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidDevice`,\n\nSee Also\n--------\n:py:obj:`~.cudaGetDeviceCount`, :py:obj:`~.cudaGetDevice`, :py:obj:`~.cudaGetDeviceProperties`, :py:obj:`~.cudaChooseDevice`, :py:obj:`~.cudaSetDevice` :py:obj:`~.cuCtxSetCurrent`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_67cudaInitDevice = {"cudaInitDevice", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_67cudaInitDevice, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_66cudaInitDevice}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_67cudaInitDevice(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { int __pyx_v_device; unsigned int __pyx_v_deviceFlags; unsigned int __pyx_v_flags; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[3] = {0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaInitDevice (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_deviceFlags,&__pyx_mstate_global->__pyx_n_u_flags_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 16009, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 16009, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 16009, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16009, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaInitDevice", 0) < (0)) __PYX_ERR(0, 16009, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 3; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaInitDevice", 1, 3, 3, i); __PYX_ERR(0, 16009, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 3)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16009, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 16009, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 16009, __pyx_L3_error) } __pyx_v_device = __Pyx_PyLong_As_int(values[0]); if (unlikely((__pyx_v_device == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 16010, __pyx_L3_error) __pyx_v_deviceFlags = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_deviceFlags == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 16010, __pyx_L3_error) __pyx_v_flags = __Pyx_PyLong_As_unsigned_int(values[2]); if (unlikely((__pyx_v_flags == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 16010, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaInitDevice", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 16009, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaInitDevice", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_66cudaInitDevice(__pyx_self, __pyx_v_device, __pyx_v_deviceFlags, __pyx_v_flags); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_66cudaInitDevice(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_device, unsigned int __pyx_v_deviceFlags, unsigned int __pyx_v_flags) { cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations cudaError_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaInitDevice", 0); /* "cuda/bindings/runtime.pyx":16045 * :py:obj:`~.cudaGetDeviceCount`, :py:obj:`~.cudaGetDevice`, :py:obj:`~.cudaGetDeviceProperties`, :py:obj:`~.cudaChooseDevice`, :py:obj:`~.cudaSetDevice` :py:obj:`~.cuCtxSetCurrent` * """ * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaInitDevice(device, deviceFlags, flags) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":16046 * """ * with nogil: * err = cyruntime.cudaInitDevice(device, deviceFlags, flags) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_1 = __pyx_f_4cuda_8bindings_9cyruntime_cudaInitDevice(__pyx_v_device, __pyx_v_deviceFlags, __pyx_v_flags); if (unlikely(__pyx_t_1 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 16046, __pyx_L4_error) __pyx_v_err = __pyx_t_1; } /* "cuda/bindings/runtime.pyx":16045 * :py:obj:`~.cudaGetDeviceCount`, :py:obj:`~.cudaGetDevice`, :py:obj:`~.cudaGetDeviceProperties`, :py:obj:`~.cudaChooseDevice`, :py:obj:`~.cudaSetDevice` :py:obj:`~.cuCtxSetCurrent` * """ * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaInitDevice(device, deviceFlags, flags) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":16047 * with nogil: * err = cyruntime.cudaInitDevice(device, deviceFlags, flags) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16047, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16047, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16047, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16047, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 16047, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":16009 * return (_dict_cudaError_t[err], device) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaInitDevice(int device, unsigned int deviceFlags, unsigned int flags): * """ Initialize device to be used for GPU executions. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaInitDevice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":16049 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaSetDevice(int device): * """ Set device to be used for GPU executions. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_69cudaSetDevice(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_68cudaSetDevice, "cudaSetDevice(int device)\n\nSet device to be used for GPU executions.\n\nSets `device` as the current device for the calling host thread. Valid\ndevice id's are 0 to (:py:obj:`~.cudaGetDeviceCount()` - 1).\n\nAny device memory subsequently allocated from this host thread using\n:py:obj:`~.cudaMalloc()`, :py:obj:`~.cudaMallocPitch()` or\n:py:obj:`~.cudaMallocArray()` will be physically resident on `device`.\nAny host memory allocated from this host thread using\n:py:obj:`~.cudaMallocHost()` or :py:obj:`~.cudaHostAlloc()` or\n:py:obj:`~.cudaHostRegister()` will have its lifetime associated with\n`device`. Any streams or events created from this host thread will be\nassociated with `device`. Any kernels launched from this host thread\nusing the <<<>>> operator or :py:obj:`~.cudaLaunchKernel()` will be\nexecuted on `device`.\n\nThis call may be made from any host thread, to any device, and at any\ntime. This function will do no synchronization with the previous or new\ndevice, and should only take significant time when it initializes the\nruntime's context state. This call will bind the primary context of the\nspecified device to the calling thread and all the subsequent memory\nallocations, stream and event creations, and kernel launches will be\nassociated with the primary context. This function will also\nimmediately initialize the runtime state on the primary context, and\nthe context will be current on `device` immediately. This function will\nreturn an error if the device is in\n:py:obj:`~.cudaComputeModeExclusiveProcess` and is occupied by another\nprocess or if the device is in :py:obj:`~.cudaComputeModeProhibited`.\n\nIt is not required to call :py:obj:`~.cudaInitDevice` before using this\nfunction.\n\nParameters\n----------\ndevice : int\n Device on which the active host thread should execute the device\n code.\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidDevice`, :py:obj:`~.cudaErrorDeviceUnavailable`,\n""\nSee Also\n--------\n:py:obj:`~.cudaGetDeviceCount`, :py:obj:`~.cudaGetDevice`, :py:obj:`~.cudaGetDeviceProperties`, :py:obj:`~.cudaChooseDevice`, :py:obj:`~.cudaInitDevice`, :py:obj:`~.cuCtxSetCurrent`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_69cudaSetDevice = {"cudaSetDevice", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_69cudaSetDevice, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_68cudaSetDevice}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_69cudaSetDevice(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { int __pyx_v_device; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaSetDevice (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 16049, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16049, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaSetDevice", 0) < (0)) __PYX_ERR(0, 16049, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaSetDevice", 1, 1, 1, i); __PYX_ERR(0, 16049, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16049, __pyx_L3_error) } __pyx_v_device = __Pyx_PyLong_As_int(values[0]); if (unlikely((__pyx_v_device == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 16050, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaSetDevice", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 16049, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaSetDevice", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_68cudaSetDevice(__pyx_self, __pyx_v_device); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_68cudaSetDevice(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_device) { cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations cudaError_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaSetDevice", 0); /* "cuda/bindings/runtime.pyx":16098 * :py:obj:`~.cudaGetDeviceCount`, :py:obj:`~.cudaGetDevice`, :py:obj:`~.cudaGetDeviceProperties`, :py:obj:`~.cudaChooseDevice`, :py:obj:`~.cudaInitDevice`, :py:obj:`~.cuCtxSetCurrent` * """ * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaSetDevice(device) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":16099 * """ * with nogil: * err = cyruntime.cudaSetDevice(device) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_1 = __pyx_f_4cuda_8bindings_9cyruntime_cudaSetDevice(__pyx_v_device); if (unlikely(__pyx_t_1 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 16099, __pyx_L4_error) __pyx_v_err = __pyx_t_1; } /* "cuda/bindings/runtime.pyx":16098 * :py:obj:`~.cudaGetDeviceCount`, :py:obj:`~.cudaGetDevice`, :py:obj:`~.cudaGetDeviceProperties`, :py:obj:`~.cudaChooseDevice`, :py:obj:`~.cudaInitDevice`, :py:obj:`~.cuCtxSetCurrent` * """ * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaSetDevice(device) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":16100 * with nogil: * err = cyruntime.cudaSetDevice(device) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16100, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16100, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16100, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16100, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 16100, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":16049 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaSetDevice(int device): * """ Set device to be used for GPU executions. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaSetDevice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":16102 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGetDevice(): * """ Returns which device is currently being used. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_71cudaGetDevice(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_70cudaGetDevice, "cudaGetDevice()\n\nReturns which device is currently being used.\n\nReturns in `*device` the current device for the calling host thread.\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorDeviceUnavailable`,\ndevice : int\n Returns the device on which the active host thread executes the\n device code.\n\nSee Also\n--------\n:py:obj:`~.cudaGetDeviceCount`, :py:obj:`~.cudaSetDevice`, :py:obj:`~.cudaGetDeviceProperties`, :py:obj:`~.cudaChooseDevice`, :py:obj:`~.cuCtxGetCurrent`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_71cudaGetDevice = {"cudaGetDevice", (PyCFunction)__pyx_pw_4cuda_8bindings_7runtime_71cudaGetDevice, METH_NOARGS, __pyx_doc_4cuda_8bindings_7runtime_70cudaGetDevice}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_71cudaGetDevice(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGetDevice (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_70cudaGetDevice(__pyx_self); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_70cudaGetDevice(CYTHON_UNUSED PyObject *__pyx_self) { int __pyx_v_device; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations cudaError_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGetDevice", 0); /* "cuda/bindings/runtime.pyx":16120 * :py:obj:`~.cudaGetDeviceCount`, :py:obj:`~.cudaSetDevice`, :py:obj:`~.cudaGetDeviceProperties`, :py:obj:`~.cudaChooseDevice`, :py:obj:`~.cuCtxGetCurrent` * """ * cdef int device = 0 # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGetDevice(&device) */ __pyx_v_device = 0; /* "cuda/bindings/runtime.pyx":16121 * """ * cdef int device = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGetDevice(&device) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":16122 * cdef int device = 0 * with nogil: * err = cyruntime.cudaGetDevice(&device) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_1 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGetDevice((&__pyx_v_device)); if (unlikely(__pyx_t_1 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 16122, __pyx_L4_error) __pyx_v_err = __pyx_t_1; } /* "cuda/bindings/runtime.pyx":16121 * """ * cdef int device = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGetDevice(&device) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":16123 * with nogil: * err = cyruntime.cudaGetDevice(&device) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], device) */ __pyx_t_2 = (__pyx_v_err != cudaSuccess); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":16124 * err = cyruntime.cudaGetDevice(&device) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], device) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16124, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16124, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16124, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16124, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 16124, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, Py_None) != (0)) __PYX_ERR(0, 16124, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":16123 * with nogil: * err = cyruntime.cudaGetDevice(&device) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], device) */ } /* "cuda/bindings/runtime.pyx":16125 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], device) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16125, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16125, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16125, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyLong_From_int(__pyx_v_device); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16125, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16125, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 16125, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_5) != (0)) __PYX_ERR(0, 16125, __pyx_L1_error); __pyx_t_3 = 0; __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":16102 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGetDevice(): * """ Returns which device is currently being used. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGetDevice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":16127 * return (_dict_cudaError_t[err], device) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaSetDeviceFlags(unsigned int flags): * """ Sets flags to be used for device executions. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_73cudaSetDeviceFlags(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_72cudaSetDeviceFlags, "cudaSetDeviceFlags(unsigned int flags)\n\nSets flags to be used for device executions.\n\nRecords `flags` as the flags for the current device. If the current\ndevice has been set and that device has already been initialized, the\nprevious flags are overwritten. If the current device has not been\ninitialized, it is initialized with the provided flags. If no device\nhas been made current to the calling thread, a default device is\nselected and initialized with the provided flags.\n\nThe three LSBs of the `flags` parameter can be used to control how the\nCPU thread interacts with the OS scheduler when waiting for results\nfrom the device.\n\n- :py:obj:`~.cudaDeviceScheduleAuto`: The default value if the `flags`\n parameter is zero, uses a heuristic based on the number of active\n CUDA contexts in the process `C` and the number of logical processors\n in the system `P`. If `C` > `P`, then CUDA will yield to other OS\n threads when waiting for the device, otherwise CUDA will not yield\n while waiting for results and actively spin on the processor.\n Additionally, on Tegra devices, :py:obj:`~.cudaDeviceScheduleAuto`\n uses a heuristic based on the power profile of the platform and may\n choose :py:obj:`~.cudaDeviceScheduleBlockingSync` for low-powered\n devices.\n\n- :py:obj:`~.cudaDeviceScheduleSpin`: Instruct CUDA to actively spin\n when waiting for results from the device. This can decrease latency\n when waiting for the device, but may lower the performance of CPU\n threads if they are performing work in parallel with the CUDA thread.\n\n- :py:obj:`~.cudaDeviceScheduleYield`: Instruct CUDA to yield its\n thread when waiting for results from the device. This can increase\n latency when waiting for the device, but can increase the performance\n of CPU threads performing work in parallel with the device.\n\n- :py:obj:`~.cudaDeviceScheduleBlockingSync`: Instruct CUDA to block\n the CPU thread on a synchronization primitive when waiting for the\n device t""o finish work.\n\n- :py:obj:`~.cudaDeviceBlockingSync`: Instruct CUDA to block the CPU\n thread on a synchronization primitive when waiting for the device to\n finish work. :py:obj:`~.Deprecated:` This flag was deprecated as of\n CUDA 4.0 and replaced with\n :py:obj:`~.cudaDeviceScheduleBlockingSync`.\n\n- :py:obj:`~.cudaDeviceMapHost`: This flag enables allocating pinned\n host memory that is accessible to the device. It is implicit for the\n runtime but may be absent if a context is created using the driver\n API. If this flag is not set, :py:obj:`~.cudaHostGetDevicePointer()`\n will always return a failure code.\n\n- :py:obj:`~.cudaDeviceLmemResizeToMax`: Instruct CUDA to not reduce\n local memory after resizing local memory for a kernel. This can\n prevent thrashing by local memory allocations when launching many\n kernels with high local memory usage at the cost of potentially\n increased memory usage. :py:obj:`~.Deprecated:` This flag is\n deprecated and the behavior enabled by this flag is now the default\n and cannot be disabled.\n\n- :py:obj:`~.cudaDeviceSyncMemops`: Ensures that synchronous memory\n operations initiated on this context will always synchronize. See\n further documentation in the section titled \"API Synchronization\n behavior\" to learn more about cases when synchronous memory\n operations can exhibit asynchronous behavior.\n\nParameters\n----------\nflags : unsigned int\n Parameters for device operation\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\n\nSee Also\n--------\n:py:obj:`~.cudaGetDeviceFlags`, :py:obj:`~.cudaGetDeviceCount`, :py:obj:`~.cudaGetDevice`, :py:obj:`~.cudaGetDeviceProperties`, :py:obj:`~.cudaSetDevice`, :py:obj:`~.cudaSetValidDevices`, :py:obj:`~.cudaInitDevice`, :py:obj:`~.cudaChooseDevice`, :py:obj:`~.cuDevicePrimaryCtxSetFlags`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_73cudaSetDeviceFlags = {"cudaSetDeviceFlags", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_73cudaSetDeviceFlags, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_72cudaSetDeviceFlags}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_73cudaSetDeviceFlags(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { unsigned int __pyx_v_flags; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaSetDeviceFlags (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_flags_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 16127, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16127, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaSetDeviceFlags", 0) < (0)) __PYX_ERR(0, 16127, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaSetDeviceFlags", 1, 1, 1, i); __PYX_ERR(0, 16127, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16127, __pyx_L3_error) } __pyx_v_flags = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_flags == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 16128, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaSetDeviceFlags", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 16127, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaSetDeviceFlags", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_72cudaSetDeviceFlags(__pyx_self, __pyx_v_flags); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_72cudaSetDeviceFlags(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_flags) { cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations cudaError_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaSetDeviceFlags", 0); /* "cuda/bindings/runtime.pyx":16207 * :py:obj:`~.cudaGetDeviceFlags`, :py:obj:`~.cudaGetDeviceCount`, :py:obj:`~.cudaGetDevice`, :py:obj:`~.cudaGetDeviceProperties`, :py:obj:`~.cudaSetDevice`, :py:obj:`~.cudaSetValidDevices`, :py:obj:`~.cudaInitDevice`, :py:obj:`~.cudaChooseDevice`, :py:obj:`~.cuDevicePrimaryCtxSetFlags` * """ * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaSetDeviceFlags(flags) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":16208 * """ * with nogil: * err = cyruntime.cudaSetDeviceFlags(flags) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_1 = __pyx_f_4cuda_8bindings_9cyruntime_cudaSetDeviceFlags(__pyx_v_flags); if (unlikely(__pyx_t_1 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 16208, __pyx_L4_error) __pyx_v_err = __pyx_t_1; } /* "cuda/bindings/runtime.pyx":16207 * :py:obj:`~.cudaGetDeviceFlags`, :py:obj:`~.cudaGetDeviceCount`, :py:obj:`~.cudaGetDevice`, :py:obj:`~.cudaGetDeviceProperties`, :py:obj:`~.cudaSetDevice`, :py:obj:`~.cudaSetValidDevices`, :py:obj:`~.cudaInitDevice`, :py:obj:`~.cudaChooseDevice`, :py:obj:`~.cuDevicePrimaryCtxSetFlags` * """ * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaSetDeviceFlags(flags) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":16209 * with nogil: * err = cyruntime.cudaSetDeviceFlags(flags) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16209, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16209, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16209, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16209, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 16209, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":16127 * return (_dict_cudaError_t[err], device) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaSetDeviceFlags(unsigned int flags): * """ Sets flags to be used for device executions. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaSetDeviceFlags", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":16211 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGetDeviceFlags(): * """ Gets the flags for the current device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_75cudaGetDeviceFlags(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_74cudaGetDeviceFlags, "cudaGetDeviceFlags()\n\nGets the flags for the current device.\n\nReturns in `flags` the flags for the current device. If there is a\ncurrent device for the calling thread, the flags for the device are\nreturned. If there is no current device, the flags for the first device\nare returned, which may be the default flags. Compare to the behavior\nof :py:obj:`~.cudaSetDeviceFlags`.\n\nTypically, the flags returned should match the behavior that will be\nseen if the calling thread uses a device after this call, without any\nchange to the flags or current device inbetween by this or another\nthread. Note that if the device is not initialized, it is possible for\nanother thread to change the flags for the current device before it is\ninitialized. Additionally, when using exclusive mode, if this thread\nhas not requested a specific device, it may use a device other than the\nfirst device, contrary to the assumption made by this function.\n\nIf a context has been created via the driver API and is current to the\ncalling thread, the flags for that context are always returned.\n\nFlags returned by this function may specifically include\n:py:obj:`~.cudaDeviceMapHost` even though it is not accepted by\n:py:obj:`~.cudaSetDeviceFlags` because it is implicit in runtime API\nflags. The reason for this is that the current context may have been\ncreated via the driver API in which case the flag is not implicit and\nmay be unset.\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidDevice`\nflags : unsigned int\n Pointer to store the device flags\n\nSee Also\n--------\n:py:obj:`~.cudaGetDevice`, :py:obj:`~.cudaGetDeviceProperties`, :py:obj:`~.cudaSetDevice`, :py:obj:`~.cudaSetDeviceFlags`, :py:obj:`~.cudaInitDevice`, :py:obj:`~.cuCtxGetFlags`, :py:obj:`~.cuDevicePrimaryCtxGetState`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_75cudaGetDeviceFlags = {"cudaGetDeviceFlags", (PyCFunction)__pyx_pw_4cuda_8bindings_7runtime_75cudaGetDeviceFlags, METH_NOARGS, __pyx_doc_4cuda_8bindings_7runtime_74cudaGetDeviceFlags}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_75cudaGetDeviceFlags(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGetDeviceFlags (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_74cudaGetDeviceFlags(__pyx_self); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_74cudaGetDeviceFlags(CYTHON_UNUSED PyObject *__pyx_self) { unsigned int __pyx_v_flags; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations cudaError_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGetDeviceFlags", 0); /* "cuda/bindings/runtime.pyx":16251 * :py:obj:`~.cudaGetDevice`, :py:obj:`~.cudaGetDeviceProperties`, :py:obj:`~.cudaSetDevice`, :py:obj:`~.cudaSetDeviceFlags`, :py:obj:`~.cudaInitDevice`, :py:obj:`~.cuCtxGetFlags`, :py:obj:`~.cuDevicePrimaryCtxGetState` * """ * cdef unsigned int flags = 0 # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGetDeviceFlags(&flags) */ __pyx_v_flags = 0; /* "cuda/bindings/runtime.pyx":16252 * """ * cdef unsigned int flags = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGetDeviceFlags(&flags) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":16253 * cdef unsigned int flags = 0 * with nogil: * err = cyruntime.cudaGetDeviceFlags(&flags) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_1 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGetDeviceFlags((&__pyx_v_flags)); if (unlikely(__pyx_t_1 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 16253, __pyx_L4_error) __pyx_v_err = __pyx_t_1; } /* "cuda/bindings/runtime.pyx":16252 * """ * cdef unsigned int flags = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGetDeviceFlags(&flags) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":16254 * with nogil: * err = cyruntime.cudaGetDeviceFlags(&flags) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], flags) */ __pyx_t_2 = (__pyx_v_err != cudaSuccess); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":16255 * err = cyruntime.cudaGetDeviceFlags(&flags) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], flags) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16255, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16255, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16255, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16255, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 16255, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, Py_None) != (0)) __PYX_ERR(0, 16255, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":16254 * with nogil: * err = cyruntime.cudaGetDeviceFlags(&flags) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], flags) */ } /* "cuda/bindings/runtime.pyx":16256 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], flags) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16256, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16256, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16256, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyLong_From_unsigned_int(__pyx_v_flags); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16256, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16256, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 16256, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_5) != (0)) __PYX_ERR(0, 16256, __pyx_L1_error); __pyx_t_3 = 0; __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":16211 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGetDeviceFlags(): * """ Gets the flags for the current device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGetDeviceFlags", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":16258 * return (_dict_cudaError_t[err], flags) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamCreate(): * """ Create an asynchronous stream. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_77cudaStreamCreate(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_76cudaStreamCreate, "cudaStreamCreate()\n\nCreate an asynchronous stream.\n\nCreates a new asynchronous stream on the context that is current to the\ncalling host thread. If no context is current to the calling host\nthread, then the primary context for a device is selected, made current\nto the calling thread, and initialized before creating a stream on it.\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\npStream : :py:obj:`~.cudaStream_t`\n Pointer to new stream identifier\n\nSee Also\n--------\n:py:obj:`~.cudaStreamCreateWithPriority`, :py:obj:`~.cudaStreamCreateWithFlags`, :py:obj:`~.cudaStreamGetPriority`, :py:obj:`~.cudaStreamGetFlags`, :py:obj:`~.cudaStreamGetDevice`, :py:obj:`~.cudaStreamQuery`, :py:obj:`~.cudaStreamSynchronize`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cudaStreamAddCallback`, :py:obj:`~.cudaSetDevice`, :py:obj:`~.cudaStreamDestroy`, :py:obj:`~.cuStreamCreate`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_77cudaStreamCreate = {"cudaStreamCreate", (PyCFunction)__pyx_pw_4cuda_8bindings_7runtime_77cudaStreamCreate, METH_NOARGS, __pyx_doc_4cuda_8bindings_7runtime_76cudaStreamCreate}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_77cudaStreamCreate(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaStreamCreate (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_76cudaStreamCreate(__pyx_self); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_76cudaStreamCreate(CYTHON_UNUSED PyObject *__pyx_self) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaStream_t *__pyx_v_pStream = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; cudaError_t __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaStreamCreate", 0); /* "cuda/bindings/runtime.pyx":16278 * :py:obj:`~.cudaStreamCreateWithPriority`, :py:obj:`~.cudaStreamCreateWithFlags`, :py:obj:`~.cudaStreamGetPriority`, :py:obj:`~.cudaStreamGetFlags`, :py:obj:`~.cudaStreamGetDevice`, :py:obj:`~.cudaStreamQuery`, :py:obj:`~.cudaStreamSynchronize`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cudaStreamAddCallback`, :py:obj:`~.cudaSetDevice`, :py:obj:`~.cudaStreamDestroy`, :py:obj:`~.cuStreamCreate` * """ * cdef cudaStream_t pStream = cudaStream_t() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaStreamCreate(pStream._pvt_ptr) */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (1-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16278, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_pStream = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaStream_t *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":16279 * """ * cdef cudaStream_t pStream = cudaStream_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamCreate(pStream._pvt_ptr) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":16280 * cdef cudaStream_t pStream = cudaStream_t() * with nogil: * err = cyruntime.cudaStreamCreate(pStream._pvt_ptr) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_5 = __pyx_f_4cuda_8bindings_9cyruntime_cudaStreamCreate(((cudaStream_t *)__pyx_v_pStream->__pyx_base._pvt_ptr)); if (unlikely(__pyx_t_5 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 16280, __pyx_L4_error) __pyx_v_err = __pyx_t_5; } /* "cuda/bindings/runtime.pyx":16279 * """ * cdef cudaStream_t pStream = cudaStream_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamCreate(pStream._pvt_ptr) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":16281 * with nogil: * err = cyruntime.cudaStreamCreate(pStream._pvt_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pStream) */ __pyx_t_6 = (__pyx_v_err != cudaSuccess); if (__pyx_t_6) { /* "cuda/bindings/runtime.pyx":16282 * err = cyruntime.cudaStreamCreate(pStream._pvt_ptr) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], pStream) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16282, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16282, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16282, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16282, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 16282, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 16282, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":16281 * with nogil: * err = cyruntime.cudaStreamCreate(pStream._pvt_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pStream) */ } /* "cuda/bindings/runtime.pyx":16283 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pStream) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16283, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16283, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16283, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16283, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 16283, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_pStream); __Pyx_GIVEREF((PyObject *)__pyx_v_pStream); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_pStream)) != (0)) __PYX_ERR(0, 16283, __pyx_L1_error); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":16258 * return (_dict_cudaError_t[err], flags) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamCreate(): * """ Create an asynchronous stream. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamCreate", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_pStream); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":16285 * return (_dict_cudaError_t[err], pStream) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamCreateWithFlags(unsigned int flags): * """ Create an asynchronous stream. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_79cudaStreamCreateWithFlags(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_78cudaStreamCreateWithFlags, "cudaStreamCreateWithFlags(unsigned int flags)\n\nCreate an asynchronous stream.\n\nCreates a new asynchronous stream on the context that is current to the\ncalling host thread. If no context is current to the calling host\nthread, then the primary context for a device is selected, made current\nto the calling thread, and initialized before creating a stream on it.\nThe `flags` argument determines the behaviors of the stream. Valid\nvalues for `flags` are\n\n- :py:obj:`~.cudaStreamDefault`: Default stream creation flag.\n\n- :py:obj:`~.cudaStreamNonBlocking`: Specifies that work running in the\n created stream may run concurrently with work in stream 0 (the NULL\n stream), and that the created stream should perform no implicit\n synchronization with stream 0.\n\nParameters\n----------\nflags : unsigned int\n Parameters for stream creation\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\npStream : :py:obj:`~.cudaStream_t`\n Pointer to new stream identifier\n\nSee Also\n--------\n:py:obj:`~.cudaStreamCreate`, :py:obj:`~.cudaStreamCreateWithPriority`, :py:obj:`~.cudaStreamGetFlags`, :py:obj:`~.cudaStreamGetDevice`, :py:obj:`~.cudaStreamQuery`, :py:obj:`~.cudaStreamSynchronize`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cudaStreamAddCallback`, :py:obj:`~.cudaSetDevice`, :py:obj:`~.cudaStreamDestroy`, :py:obj:`~.cuStreamCreate`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_79cudaStreamCreateWithFlags = {"cudaStreamCreateWithFlags", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_79cudaStreamCreateWithFlags, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_78cudaStreamCreateWithFlags}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_79cudaStreamCreateWithFlags(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { unsigned int __pyx_v_flags; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaStreamCreateWithFlags (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_flags_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 16285, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16285, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaStreamCreateWithFlags", 0) < (0)) __PYX_ERR(0, 16285, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaStreamCreateWithFlags", 1, 1, 1, i); __PYX_ERR(0, 16285, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16285, __pyx_L3_error) } __pyx_v_flags = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_flags == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 16286, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaStreamCreateWithFlags", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 16285, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamCreateWithFlags", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_78cudaStreamCreateWithFlags(__pyx_self, __pyx_v_flags); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_78cudaStreamCreateWithFlags(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_flags) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaStream_t *__pyx_v_pStream = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; cudaError_t __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaStreamCreateWithFlags", 0); /* "cuda/bindings/runtime.pyx":16319 * :py:obj:`~.cudaStreamCreate`, :py:obj:`~.cudaStreamCreateWithPriority`, :py:obj:`~.cudaStreamGetFlags`, :py:obj:`~.cudaStreamGetDevice`, :py:obj:`~.cudaStreamQuery`, :py:obj:`~.cudaStreamSynchronize`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cudaStreamAddCallback`, :py:obj:`~.cudaSetDevice`, :py:obj:`~.cudaStreamDestroy`, :py:obj:`~.cuStreamCreate` * """ * cdef cudaStream_t pStream = cudaStream_t() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaStreamCreateWithFlags(pStream._pvt_ptr, flags) */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (1-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16319, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_pStream = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaStream_t *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":16320 * """ * cdef cudaStream_t pStream = cudaStream_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamCreateWithFlags(pStream._pvt_ptr, flags) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":16321 * cdef cudaStream_t pStream = cudaStream_t() * with nogil: * err = cyruntime.cudaStreamCreateWithFlags(pStream._pvt_ptr, flags) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_5 = __pyx_f_4cuda_8bindings_9cyruntime_cudaStreamCreateWithFlags(((cudaStream_t *)__pyx_v_pStream->__pyx_base._pvt_ptr), __pyx_v_flags); if (unlikely(__pyx_t_5 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 16321, __pyx_L4_error) __pyx_v_err = __pyx_t_5; } /* "cuda/bindings/runtime.pyx":16320 * """ * cdef cudaStream_t pStream = cudaStream_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamCreateWithFlags(pStream._pvt_ptr, flags) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":16322 * with nogil: * err = cyruntime.cudaStreamCreateWithFlags(pStream._pvt_ptr, flags) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pStream) */ __pyx_t_6 = (__pyx_v_err != cudaSuccess); if (__pyx_t_6) { /* "cuda/bindings/runtime.pyx":16323 * err = cyruntime.cudaStreamCreateWithFlags(pStream._pvt_ptr, flags) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], pStream) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16323, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16323, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16323, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16323, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 16323, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 16323, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":16322 * with nogil: * err = cyruntime.cudaStreamCreateWithFlags(pStream._pvt_ptr, flags) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pStream) */ } /* "cuda/bindings/runtime.pyx":16324 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pStream) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16324, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16324, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16324, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16324, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 16324, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_pStream); __Pyx_GIVEREF((PyObject *)__pyx_v_pStream); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_pStream)) != (0)) __PYX_ERR(0, 16324, __pyx_L1_error); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":16285 * return (_dict_cudaError_t[err], pStream) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamCreateWithFlags(unsigned int flags): * """ Create an asynchronous stream. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamCreateWithFlags", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_pStream); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":16326 * return (_dict_cudaError_t[err], pStream) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamCreateWithPriority(unsigned int flags, int priority): * """ Create an asynchronous stream with the specified priority. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_81cudaStreamCreateWithPriority(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_80cudaStreamCreateWithPriority, "cudaStreamCreateWithPriority(unsigned int flags, int priority)\n\nCreate an asynchronous stream with the specified priority.\n\nCreates a stream with the specified priority and returns a handle in\n`pStream`. The stream is created on the context that is current to the\ncalling host thread. If no context is current to the calling host\nthread, then the primary context for a device is selected, made current\nto the calling thread, and initialized before creating a stream on it.\nThis affects the scheduling priority of work in the stream. Priorities\nprovide a hint to preferentially run work with higher priority when\npossible, but do not preempt already-running work or provide any other\nfunctional guarantee on execution order.\n\n`priority` follows a convention where lower numbers represent higher\npriorities. '0' represents default priority. The range of meaningful\nnumerical priorities can be queried using\n:py:obj:`~.cudaDeviceGetStreamPriorityRange`. If the specified priority\nis outside the numerical range returned by\n:py:obj:`~.cudaDeviceGetStreamPriorityRange`, it will automatically be\nclamped to the lowest or the highest number in the range.\n\nParameters\n----------\nflags : unsigned int\n Flags for stream creation. See\n :py:obj:`~.cudaStreamCreateWithFlags` for a list of valid flags\n that can be passed\npriority : int\n Priority of the stream. Lower numbers represent higher priorities.\n See :py:obj:`~.cudaDeviceGetStreamPriorityRange` for more\n information about the meaningful stream priorities that can be\n passed.\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\npStream : :py:obj:`~.cudaStream_t`\n Pointer to new stream identifier\n\nSee Also\n--------\n:py:obj:`~.cudaStreamCreate`, :py:obj:`~.cudaStreamCreateWithFlags`, :py:obj:`~.cudaDeviceGetStreamPriorityRange`, :py:obj:`~.cudaStreamGetPriority`, :py:obj:`~.cudaStreamQuery`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cudaS""treamAddCallback`, :py:obj:`~.cudaStreamSynchronize`, :py:obj:`~.cudaSetDevice`, :py:obj:`~.cudaStreamDestroy`, :py:obj:`~.cuStreamCreateWithPriority`\n\nNotes\n-----\nStream priorities are supported only on GPUs with compute capability 3.5 or higher.\n\nIn the current implementation, only compute kernels launched in priority streams are affected by the stream's priority. Stream priorities have no effect on host-to-device and device-to-host memory operations."); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_81cudaStreamCreateWithPriority = {"cudaStreamCreateWithPriority", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_81cudaStreamCreateWithPriority, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_80cudaStreamCreateWithPriority}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_81cudaStreamCreateWithPriority(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { unsigned int __pyx_v_flags; int __pyx_v_priority; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaStreamCreateWithPriority (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_flags_2,&__pyx_mstate_global->__pyx_n_u_priority_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 16326, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 16326, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16326, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaStreamCreateWithPriority", 0) < (0)) __PYX_ERR(0, 16326, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaStreamCreateWithPriority", 1, 2, 2, i); __PYX_ERR(0, 16326, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16326, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 16326, __pyx_L3_error) } __pyx_v_flags = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_flags == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 16327, __pyx_L3_error) __pyx_v_priority = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_priority == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 16327, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaStreamCreateWithPriority", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 16326, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamCreateWithPriority", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_80cudaStreamCreateWithPriority(__pyx_self, __pyx_v_flags, __pyx_v_priority); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_80cudaStreamCreateWithPriority(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_flags, int __pyx_v_priority) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaStream_t *__pyx_v_pStream = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; cudaError_t __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaStreamCreateWithPriority", 0); /* "cuda/bindings/runtime.pyx":16377 * In the current implementation, only compute kernels launched in priority streams are affected by the stream's priority. Stream priorities have no effect on host-to-device and device-to-host memory operations. * """ * cdef cudaStream_t pStream = cudaStream_t() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaStreamCreateWithPriority(pStream._pvt_ptr, flags, priority) */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (1-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16377, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_pStream = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaStream_t *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":16378 * """ * cdef cudaStream_t pStream = cudaStream_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamCreateWithPriority(pStream._pvt_ptr, flags, priority) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":16379 * cdef cudaStream_t pStream = cudaStream_t() * with nogil: * err = cyruntime.cudaStreamCreateWithPriority(pStream._pvt_ptr, flags, priority) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_5 = __pyx_f_4cuda_8bindings_9cyruntime_cudaStreamCreateWithPriority(((cudaStream_t *)__pyx_v_pStream->__pyx_base._pvt_ptr), __pyx_v_flags, __pyx_v_priority); if (unlikely(__pyx_t_5 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 16379, __pyx_L4_error) __pyx_v_err = __pyx_t_5; } /* "cuda/bindings/runtime.pyx":16378 * """ * cdef cudaStream_t pStream = cudaStream_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamCreateWithPriority(pStream._pvt_ptr, flags, priority) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":16380 * with nogil: * err = cyruntime.cudaStreamCreateWithPriority(pStream._pvt_ptr, flags, priority) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pStream) */ __pyx_t_6 = (__pyx_v_err != cudaSuccess); if (__pyx_t_6) { /* "cuda/bindings/runtime.pyx":16381 * err = cyruntime.cudaStreamCreateWithPriority(pStream._pvt_ptr, flags, priority) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], pStream) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16381, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16381, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16381, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16381, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 16381, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 16381, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":16380 * with nogil: * err = cyruntime.cudaStreamCreateWithPriority(pStream._pvt_ptr, flags, priority) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pStream) */ } /* "cuda/bindings/runtime.pyx":16382 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pStream) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16382, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16382, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16382, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16382, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 16382, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_pStream); __Pyx_GIVEREF((PyObject *)__pyx_v_pStream); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_pStream)) != (0)) __PYX_ERR(0, 16382, __pyx_L1_error); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":16326 * return (_dict_cudaError_t[err], pStream) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamCreateWithPriority(unsigned int flags, int priority): * """ Create an asynchronous stream with the specified priority. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamCreateWithPriority", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_pStream); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":16384 * return (_dict_cudaError_t[err], pStream) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamGetPriority(hStream): * """ Query the priority of a stream. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_83cudaStreamGetPriority(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_82cudaStreamGetPriority, "cudaStreamGetPriority(hStream)\n\nQuery the priority of a stream.\n\nQuery the priority of a stream. The priority is returned in in\n`priority`. Note that if the stream was created with a priority outside\nthe meaningful numerical range returned by\n:py:obj:`~.cudaDeviceGetStreamPriorityRange`, this function returns the\nclamped priority. See :py:obj:`~.cudaStreamCreateWithPriority` for\ndetails about priority clamping.\n\nParameters\n----------\nhStream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Handle to the stream to be queried\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidResourceHandle`\npriority : int\n Pointer to a signed integer in which the stream's priority is\n returned\n\nSee Also\n--------\n:py:obj:`~.cudaStreamCreateWithPriority`, :py:obj:`~.cudaDeviceGetStreamPriorityRange`, :py:obj:`~.cudaStreamGetFlags`, :py:obj:`~.cudaStreamGetDevice`, :py:obj:`~.cuStreamGetPriority`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_83cudaStreamGetPriority = {"cudaStreamGetPriority", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_83cudaStreamGetPriority, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_82cudaStreamGetPriority}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_83cudaStreamGetPriority(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_hStream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaStreamGetPriority (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_hStream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 16384, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16384, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaStreamGetPriority", 0) < (0)) __PYX_ERR(0, 16384, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaStreamGetPriority", 1, 1, 1, i); __PYX_ERR(0, 16384, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16384, __pyx_L3_error) } __pyx_v_hStream = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaStreamGetPriority", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 16384, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamGetPriority", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_82cudaStreamGetPriority(__pyx_self, __pyx_v_hStream); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_82cudaStreamGetPriority(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_hStream) { cudaStream_t __pyx_v_cyhStream; PyObject *__pyx_v_phStream = NULL; int __pyx_v_priority; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaStreamGetPriority", 0); /* "cuda/bindings/runtime.pyx":16413 * """ * cdef cyruntime.cudaStream_t cyhStream * if hStream is None: # <<<<<<<<<<<<<< * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_hStream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16414 * cdef cyruntime.cudaStream_t cyhStream * if hStream is None: * phStream = 0 # <<<<<<<<<<<<<< * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): * phStream = int(hStream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_phStream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":16413 * """ * cdef cyruntime.cudaStream_t cyhStream * if hStream is None: # <<<<<<<<<<<<<< * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":16415 * if hStream is None: * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * phStream = int(hStream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_hStream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_hStream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16416 * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): * phStream = int(hStream) # <<<<<<<<<<<<<< * else: * phStream = int(cudaStream_t(hStream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_hStream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16416, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_phStream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":16415 * if hStream is None: * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * phStream = int(hStream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":16418 * phStream = int(hStream) * else: * phStream = int(cudaStream_t(hStream)) # <<<<<<<<<<<<<< * cyhStream = phStream * cdef int priority = 0 */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_hStream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16418, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16418, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_phStream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":16419 * else: * phStream = int(cudaStream_t(hStream)) * cyhStream = phStream # <<<<<<<<<<<<<< * cdef int priority = 0 * with nogil: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_phStream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 16419, __pyx_L1_error) __pyx_v_cyhStream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":16420 * phStream = int(cudaStream_t(hStream)) * cyhStream = phStream * cdef int priority = 0 # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaStreamGetPriority(cyhStream, &priority) */ __pyx_v_priority = 0; /* "cuda/bindings/runtime.pyx":16421 * cyhStream = phStream * cdef int priority = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamGetPriority(cyhStream, &priority) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":16422 * cdef int priority = 0 * with nogil: * err = cyruntime.cudaStreamGetPriority(cyhStream, &priority) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaStreamGetPriority(__pyx_v_cyhStream, (&__pyx_v_priority)); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 16422, __pyx_L7_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":16421 * cyhStream = phStream * cdef int priority = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamGetPriority(cyhStream, &priority) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":16423 * with nogil: * err = cyruntime.cudaStreamGetPriority(cyhStream, &priority) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], priority) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16424 * err = cyruntime.cudaStreamGetPriority(cyhStream, &priority) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], priority) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16424, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16424, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16424, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16424, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 16424, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 16424, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":16423 * with nogil: * err = cyruntime.cudaStreamGetPriority(cyhStream, &priority) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], priority) */ } /* "cuda/bindings/runtime.pyx":16425 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], priority) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyLong_From_int(__pyx_v_priority); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 16425, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 16425, __pyx_L1_error); __pyx_t_5 = 0; __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":16384 * return (_dict_cudaError_t[err], pStream) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamGetPriority(hStream): * """ Query the priority of a stream. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamGetPriority", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_phStream); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":16427 * return (_dict_cudaError_t[err], priority) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamGetFlags(hStream): * """ Query the flags of a stream. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_85cudaStreamGetFlags(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_84cudaStreamGetFlags, "cudaStreamGetFlags(hStream)\n\nQuery the flags of a stream.\n\nQuery the flags of a stream. The flags are returned in `flags`. See\n:py:obj:`~.cudaStreamCreateWithFlags` for a list of valid flags.\n\nParameters\n----------\nhStream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Handle to the stream to be queried\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidResourceHandle`\nflags : unsigned int\n Pointer to an unsigned integer in which the stream's flags are\n returned\n\nSee Also\n--------\n:py:obj:`~.cudaStreamCreateWithPriority`, :py:obj:`~.cudaStreamCreateWithFlags`, :py:obj:`~.cudaStreamGetPriority`, :py:obj:`~.cudaStreamGetDevice`, :py:obj:`~.cuStreamGetFlags`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_85cudaStreamGetFlags = {"cudaStreamGetFlags", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_85cudaStreamGetFlags, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_84cudaStreamGetFlags}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_85cudaStreamGetFlags(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_hStream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaStreamGetFlags (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_hStream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 16427, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16427, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaStreamGetFlags", 0) < (0)) __PYX_ERR(0, 16427, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaStreamGetFlags", 1, 1, 1, i); __PYX_ERR(0, 16427, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16427, __pyx_L3_error) } __pyx_v_hStream = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaStreamGetFlags", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 16427, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamGetFlags", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_84cudaStreamGetFlags(__pyx_self, __pyx_v_hStream); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_84cudaStreamGetFlags(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_hStream) { cudaStream_t __pyx_v_cyhStream; PyObject *__pyx_v_phStream = NULL; unsigned int __pyx_v_flags; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaStreamGetFlags", 0); /* "cuda/bindings/runtime.pyx":16452 * """ * cdef cyruntime.cudaStream_t cyhStream * if hStream is None: # <<<<<<<<<<<<<< * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_hStream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16453 * cdef cyruntime.cudaStream_t cyhStream * if hStream is None: * phStream = 0 # <<<<<<<<<<<<<< * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): * phStream = int(hStream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_phStream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":16452 * """ * cdef cyruntime.cudaStream_t cyhStream * if hStream is None: # <<<<<<<<<<<<<< * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":16454 * if hStream is None: * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * phStream = int(hStream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_hStream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_hStream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16455 * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): * phStream = int(hStream) # <<<<<<<<<<<<<< * else: * phStream = int(cudaStream_t(hStream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_hStream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16455, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_phStream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":16454 * if hStream is None: * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * phStream = int(hStream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":16457 * phStream = int(hStream) * else: * phStream = int(cudaStream_t(hStream)) # <<<<<<<<<<<<<< * cyhStream = phStream * cdef unsigned int flags = 0 */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_hStream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16457, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16457, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_phStream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":16458 * else: * phStream = int(cudaStream_t(hStream)) * cyhStream = phStream # <<<<<<<<<<<<<< * cdef unsigned int flags = 0 * with nogil: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_phStream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 16458, __pyx_L1_error) __pyx_v_cyhStream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":16459 * phStream = int(cudaStream_t(hStream)) * cyhStream = phStream * cdef unsigned int flags = 0 # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaStreamGetFlags(cyhStream, &flags) */ __pyx_v_flags = 0; /* "cuda/bindings/runtime.pyx":16460 * cyhStream = phStream * cdef unsigned int flags = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamGetFlags(cyhStream, &flags) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":16461 * cdef unsigned int flags = 0 * with nogil: * err = cyruntime.cudaStreamGetFlags(cyhStream, &flags) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaStreamGetFlags(__pyx_v_cyhStream, (&__pyx_v_flags)); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 16461, __pyx_L7_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":16460 * cyhStream = phStream * cdef unsigned int flags = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamGetFlags(cyhStream, &flags) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":16462 * with nogil: * err = cyruntime.cudaStreamGetFlags(cyhStream, &flags) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], flags) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16463 * err = cyruntime.cudaStreamGetFlags(cyhStream, &flags) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], flags) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16463, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16463, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16463, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16463, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 16463, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 16463, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":16462 * with nogil: * err = cyruntime.cudaStreamGetFlags(cyhStream, &flags) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], flags) */ } /* "cuda/bindings/runtime.pyx":16464 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], flags) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16464, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16464, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16464, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyLong_From_unsigned_int(__pyx_v_flags); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16464, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16464, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 16464, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 16464, __pyx_L1_error); __pyx_t_5 = 0; __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":16427 * return (_dict_cudaError_t[err], priority) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamGetFlags(hStream): * """ Query the flags of a stream. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamGetFlags", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_phStream); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":16466 * return (_dict_cudaError_t[err], flags) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamGetId(hStream): * """ Query the Id of a stream. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_87cudaStreamGetId(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_86cudaStreamGetId, "cudaStreamGetId(hStream)\n\nQuery the Id of a stream.\n\nQuery the Id of a stream. The Id is returned in `streamId`. The Id is\nunique for the life of the program.\n\nThe stream handle `hStream` can refer to any of the following:\n\n- a stream created via any of the CUDA runtime APIs such as\n :py:obj:`~.cudaStreamCreate`, :py:obj:`~.cudaStreamCreateWithFlags`\n and :py:obj:`~.cudaStreamCreateWithPriority`, or their driver API\n equivalents such as :py:obj:`~.cuStreamCreate` or\n :py:obj:`~.cuStreamCreateWithPriority`. Passing an invalid handle\n will result in undefined behavior.\n\n- any of the special streams such as the NULL stream,\n :py:obj:`~.cudaStreamLegacy` and :py:obj:`~.cudaStreamPerThread`\n respectively. The driver API equivalents of these are also accepted\n which are NULL, :py:obj:`~.CU_STREAM_LEGACY` and\n :py:obj:`~.CU_STREAM_PER_THREAD`.\n\nParameters\n----------\nhStream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Handle to the stream to be queried\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidResourceHandle`\nstreamId : unsigned long long\n Pointer to an unsigned long long in which the stream Id is returned\n\nSee Also\n--------\n:py:obj:`~.cudaStreamCreateWithPriority`, :py:obj:`~.cudaStreamCreateWithFlags`, :py:obj:`~.cudaStreamGetPriority`, :py:obj:`~.cudaStreamGetFlags`, :py:obj:`~.cuStreamGetId`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_87cudaStreamGetId = {"cudaStreamGetId", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_87cudaStreamGetId, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_86cudaStreamGetId}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_87cudaStreamGetId(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_hStream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaStreamGetId (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_hStream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 16466, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16466, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaStreamGetId", 0) < (0)) __PYX_ERR(0, 16466, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaStreamGetId", 1, 1, 1, i); __PYX_ERR(0, 16466, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16466, __pyx_L3_error) } __pyx_v_hStream = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaStreamGetId", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 16466, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamGetId", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_86cudaStreamGetId(__pyx_self, __pyx_v_hStream); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_86cudaStreamGetId(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_hStream) { cudaStream_t __pyx_v_cyhStream; PyObject *__pyx_v_phStream = NULL; unsigned PY_LONG_LONG __pyx_v_streamId; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaStreamGetId", 0); /* "cuda/bindings/runtime.pyx":16505 * """ * cdef cyruntime.cudaStream_t cyhStream * if hStream is None: # <<<<<<<<<<<<<< * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_hStream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16506 * cdef cyruntime.cudaStream_t cyhStream * if hStream is None: * phStream = 0 # <<<<<<<<<<<<<< * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): * phStream = int(hStream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_phStream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":16505 * """ * cdef cyruntime.cudaStream_t cyhStream * if hStream is None: # <<<<<<<<<<<<<< * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":16507 * if hStream is None: * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * phStream = int(hStream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_hStream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_hStream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16508 * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): * phStream = int(hStream) # <<<<<<<<<<<<<< * else: * phStream = int(cudaStream_t(hStream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_hStream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16508, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_phStream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":16507 * if hStream is None: * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * phStream = int(hStream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":16510 * phStream = int(hStream) * else: * phStream = int(cudaStream_t(hStream)) # <<<<<<<<<<<<<< * cyhStream = phStream * cdef unsigned long long streamId = 0 */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_hStream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16510, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_phStream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":16511 * else: * phStream = int(cudaStream_t(hStream)) * cyhStream = phStream # <<<<<<<<<<<<<< * cdef unsigned long long streamId = 0 * with nogil: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_phStream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 16511, __pyx_L1_error) __pyx_v_cyhStream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":16512 * phStream = int(cudaStream_t(hStream)) * cyhStream = phStream * cdef unsigned long long streamId = 0 # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaStreamGetId(cyhStream, &streamId) */ __pyx_v_streamId = 0; /* "cuda/bindings/runtime.pyx":16513 * cyhStream = phStream * cdef unsigned long long streamId = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamGetId(cyhStream, &streamId) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":16514 * cdef unsigned long long streamId = 0 * with nogil: * err = cyruntime.cudaStreamGetId(cyhStream, &streamId) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaStreamGetId(__pyx_v_cyhStream, (&__pyx_v_streamId)); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 16514, __pyx_L7_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":16513 * cyhStream = phStream * cdef unsigned long long streamId = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamGetId(cyhStream, &streamId) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":16515 * with nogil: * err = cyruntime.cudaStreamGetId(cyhStream, &streamId) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], streamId) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16516 * err = cyruntime.cudaStreamGetId(cyhStream, &streamId) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], streamId) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16516, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16516, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16516, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16516, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 16516, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 16516, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":16515 * with nogil: * err = cyruntime.cudaStreamGetId(cyhStream, &streamId) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], streamId) */ } /* "cuda/bindings/runtime.pyx":16517 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], streamId) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16517, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16517, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16517, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(__pyx_v_streamId); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16517, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16517, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 16517, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 16517, __pyx_L1_error); __pyx_t_5 = 0; __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":16466 * return (_dict_cudaError_t[err], flags) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamGetId(hStream): * """ Query the Id of a stream. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamGetId", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_phStream); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":16519 * return (_dict_cudaError_t[err], streamId) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamGetDevice(hStream): * """ Query the device of a stream. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_89cudaStreamGetDevice(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_88cudaStreamGetDevice, "cudaStreamGetDevice(hStream)\n\nQuery the device of a stream.\n\nReturns in `*device` the device of the stream.\n\nParameters\n----------\nhStream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Handle to the stream to be queried\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorDeviceUnavailable`,\ndevice : int\n Returns the device to which the stream belongs\n\nSee Also\n--------\n:py:obj:`~.cudaSetDevice`, :py:obj:`~.cudaGetDevice`, :py:obj:`~.cudaStreamCreate`, :py:obj:`~.cudaStreamGetPriority`, :py:obj:`~.cudaStreamGetFlags`, :py:obj:`~.cuStreamGetId`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_89cudaStreamGetDevice = {"cudaStreamGetDevice", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_89cudaStreamGetDevice, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_88cudaStreamGetDevice}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_89cudaStreamGetDevice(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_hStream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaStreamGetDevice (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_hStream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 16519, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16519, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaStreamGetDevice", 0) < (0)) __PYX_ERR(0, 16519, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaStreamGetDevice", 1, 1, 1, i); __PYX_ERR(0, 16519, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16519, __pyx_L3_error) } __pyx_v_hStream = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaStreamGetDevice", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 16519, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamGetDevice", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_88cudaStreamGetDevice(__pyx_self, __pyx_v_hStream); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_88cudaStreamGetDevice(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_hStream) { cudaStream_t __pyx_v_cyhStream; PyObject *__pyx_v_phStream = NULL; int __pyx_v_device; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaStreamGetDevice", 0); /* "cuda/bindings/runtime.pyx":16542 * """ * cdef cyruntime.cudaStream_t cyhStream * if hStream is None: # <<<<<<<<<<<<<< * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_hStream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16543 * cdef cyruntime.cudaStream_t cyhStream * if hStream is None: * phStream = 0 # <<<<<<<<<<<<<< * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): * phStream = int(hStream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_phStream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":16542 * """ * cdef cyruntime.cudaStream_t cyhStream * if hStream is None: # <<<<<<<<<<<<<< * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":16544 * if hStream is None: * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * phStream = int(hStream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_hStream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_hStream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16545 * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): * phStream = int(hStream) # <<<<<<<<<<<<<< * else: * phStream = int(cudaStream_t(hStream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_hStream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16545, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_phStream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":16544 * if hStream is None: * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * phStream = int(hStream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":16547 * phStream = int(hStream) * else: * phStream = int(cudaStream_t(hStream)) # <<<<<<<<<<<<<< * cyhStream = phStream * cdef int device = 0 */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_hStream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16547, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16547, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_phStream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":16548 * else: * phStream = int(cudaStream_t(hStream)) * cyhStream = phStream # <<<<<<<<<<<<<< * cdef int device = 0 * with nogil: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_phStream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 16548, __pyx_L1_error) __pyx_v_cyhStream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":16549 * phStream = int(cudaStream_t(hStream)) * cyhStream = phStream * cdef int device = 0 # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaStreamGetDevice(cyhStream, &device) */ __pyx_v_device = 0; /* "cuda/bindings/runtime.pyx":16550 * cyhStream = phStream * cdef int device = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamGetDevice(cyhStream, &device) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":16551 * cdef int device = 0 * with nogil: * err = cyruntime.cudaStreamGetDevice(cyhStream, &device) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaStreamGetDevice(__pyx_v_cyhStream, (&__pyx_v_device)); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 16551, __pyx_L7_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":16550 * cyhStream = phStream * cdef int device = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamGetDevice(cyhStream, &device) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":16552 * with nogil: * err = cyruntime.cudaStreamGetDevice(cyhStream, &device) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], device) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16553 * err = cyruntime.cudaStreamGetDevice(cyhStream, &device) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], device) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16553, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16553, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16553, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16553, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 16553, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 16553, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":16552 * with nogil: * err = cyruntime.cudaStreamGetDevice(cyhStream, &device) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], device) */ } /* "cuda/bindings/runtime.pyx":16554 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], device) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16554, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16554, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16554, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyLong_From_int(__pyx_v_device); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16554, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16554, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 16554, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 16554, __pyx_L1_error); __pyx_t_5 = 0; __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":16519 * return (_dict_cudaError_t[err], streamId) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamGetDevice(hStream): * """ Query the device of a stream. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamGetDevice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_phStream); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":16556 * return (_dict_cudaError_t[err], device) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaCtxResetPersistingL2Cache(): * """ Resets all persisting lines in cache to normal status. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_91cudaCtxResetPersistingL2Cache(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_90cudaCtxResetPersistingL2Cache, "cudaCtxResetPersistingL2Cache()\n\nResets all persisting lines in cache to normal status.\n\nResets all persisting lines in cache to normal status. Takes effect on\nfunction return.\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`,\n\nSee Also\n--------\n:py:obj:`~.cudaAccessPolicyWindow`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_91cudaCtxResetPersistingL2Cache = {"cudaCtxResetPersistingL2Cache", (PyCFunction)__pyx_pw_4cuda_8bindings_7runtime_91cudaCtxResetPersistingL2Cache, METH_NOARGS, __pyx_doc_4cuda_8bindings_7runtime_90cudaCtxResetPersistingL2Cache}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_91cudaCtxResetPersistingL2Cache(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaCtxResetPersistingL2Cache (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_90cudaCtxResetPersistingL2Cache(__pyx_self); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_90cudaCtxResetPersistingL2Cache(CYTHON_UNUSED PyObject *__pyx_self) { cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations cudaError_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaCtxResetPersistingL2Cache", 0); /* "cuda/bindings/runtime.pyx":16572 * :py:obj:`~.cudaAccessPolicyWindow` * """ * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaCtxResetPersistingL2Cache() * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":16573 * """ * with nogil: * err = cyruntime.cudaCtxResetPersistingL2Cache() # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_1 = __pyx_f_4cuda_8bindings_9cyruntime_cudaCtxResetPersistingL2Cache(); if (unlikely(__pyx_t_1 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 16573, __pyx_L4_error) __pyx_v_err = __pyx_t_1; } /* "cuda/bindings/runtime.pyx":16572 * :py:obj:`~.cudaAccessPolicyWindow` * """ * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaCtxResetPersistingL2Cache() * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":16574 * with nogil: * err = cyruntime.cudaCtxResetPersistingL2Cache() * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16574, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16574, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16574, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16574, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 16574, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":16556 * return (_dict_cudaError_t[err], device) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaCtxResetPersistingL2Cache(): * """ Resets all persisting lines in cache to normal status. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaCtxResetPersistingL2Cache", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":16576 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamCopyAttributes(dst, src): * """ Copies attributes from source stream to destination stream. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_93cudaStreamCopyAttributes(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_92cudaStreamCopyAttributes, "cudaStreamCopyAttributes(dst, src)\n\nCopies attributes from source stream to destination stream.\n\nCopies attributes from source stream `src` to destination stream `dst`.\nBoth streams must have the same context.\n\nParameters\n----------\ndst : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Destination stream\nsrc : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Source stream For attributes see :py:obj:`~.cudaStreamAttrID`\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorNotSupported`\n\nSee Also\n--------\n:py:obj:`~.cudaAccessPolicyWindow`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_93cudaStreamCopyAttributes = {"cudaStreamCopyAttributes", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_93cudaStreamCopyAttributes, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_92cudaStreamCopyAttributes}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_93cudaStreamCopyAttributes(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_dst = 0; PyObject *__pyx_v_src = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaStreamCopyAttributes (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_dst_2,&__pyx_mstate_global->__pyx_n_u_src_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 16576, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 16576, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16576, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaStreamCopyAttributes", 0) < (0)) __PYX_ERR(0, 16576, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaStreamCopyAttributes", 1, 2, 2, i); __PYX_ERR(0, 16576, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16576, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 16576, __pyx_L3_error) } __pyx_v_dst = values[0]; __pyx_v_src = values[1]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaStreamCopyAttributes", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 16576, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamCopyAttributes", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_92cudaStreamCopyAttributes(__pyx_self, __pyx_v_dst, __pyx_v_src); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_92cudaStreamCopyAttributes(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { cudaStream_t __pyx_v_cysrc; PyObject *__pyx_v_psrc = NULL; cudaStream_t __pyx_v_cydst; PyObject *__pyx_v_pdst = NULL; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaStreamCopyAttributes", 0); /* "cuda/bindings/runtime.pyx":16600 * """ * cdef cyruntime.cudaStream_t cysrc * if src is None: # <<<<<<<<<<<<<< * psrc = 0 * elif isinstance(src, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_src == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16601 * cdef cyruntime.cudaStream_t cysrc * if src is None: * psrc = 0 # <<<<<<<<<<<<<< * elif isinstance(src, (cudaStream_t,driver.CUstream)): * psrc = int(src) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_psrc = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":16600 * """ * cdef cyruntime.cudaStream_t cysrc * if src is None: # <<<<<<<<<<<<<< * psrc = 0 * elif isinstance(src, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":16602 * if src is None: * psrc = 0 * elif isinstance(src, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * psrc = int(src) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_src, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_src, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16603 * psrc = 0 * elif isinstance(src, (cudaStream_t,driver.CUstream)): * psrc = int(src) # <<<<<<<<<<<<<< * else: * psrc = int(cudaStream_t(src)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_src); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16603, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_psrc = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":16602 * if src is None: * psrc = 0 * elif isinstance(src, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * psrc = int(src) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":16605 * psrc = int(src) * else: * psrc = int(cudaStream_t(src)) # <<<<<<<<<<<<<< * cysrc = psrc * cdef cyruntime.cudaStream_t cydst */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_src}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16605, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16605, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_psrc = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":16606 * else: * psrc = int(cudaStream_t(src)) * cysrc = psrc # <<<<<<<<<<<<<< * cdef cyruntime.cudaStream_t cydst * if dst is None: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_psrc); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 16606, __pyx_L1_error) __pyx_v_cysrc = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":16608 * cysrc = psrc * cdef cyruntime.cudaStream_t cydst * if dst is None: # <<<<<<<<<<<<<< * pdst = 0 * elif isinstance(dst, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_dst == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16609 * cdef cyruntime.cudaStream_t cydst * if dst is None: * pdst = 0 # <<<<<<<<<<<<<< * elif isinstance(dst, (cudaStream_t,driver.CUstream)): * pdst = int(dst) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pdst = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":16608 * cysrc = psrc * cdef cyruntime.cudaStream_t cydst * if dst is None: # <<<<<<<<<<<<<< * pdst = 0 * elif isinstance(dst, (cudaStream_t,driver.CUstream)): */ goto __pyx_L6; } /* "cuda/bindings/runtime.pyx":16610 * if dst is None: * pdst = 0 * elif isinstance(dst, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pdst = int(dst) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_dst, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L7_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_dst, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L7_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16611 * pdst = 0 * elif isinstance(dst, (cudaStream_t,driver.CUstream)): * pdst = int(dst) # <<<<<<<<<<<<<< * else: * pdst = int(cudaStream_t(dst)) */ __pyx_t_5 = __Pyx_PyNumber_Int(__pyx_v_dst); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16611, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_v_pdst = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":16610 * if dst is None: * pdst = 0 * elif isinstance(dst, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pdst = int(dst) * else: */ goto __pyx_L6; } /* "cuda/bindings/runtime.pyx":16613 * pdst = int(dst) * else: * pdst = int(cudaStream_t(dst)) # <<<<<<<<<<<<<< * cydst = pdst * with nogil: */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_dst}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16613, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_5)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16613, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_5); __pyx_t_5 = 0; __pyx_v_pdst = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L6:; /* "cuda/bindings/runtime.pyx":16614 * else: * pdst = int(cudaStream_t(dst)) * cydst = pdst # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaStreamCopyAttributes(cydst, cysrc) */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pdst); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 16614, __pyx_L1_error) __pyx_v_cydst = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":16615 * pdst = int(cudaStream_t(dst)) * cydst = pdst * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamCopyAttributes(cydst, cysrc) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":16616 * cydst = pdst * with nogil: * err = cyruntime.cudaStreamCopyAttributes(cydst, cysrc) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaStreamCopyAttributes(__pyx_v_cydst, __pyx_v_cysrc); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 16616, __pyx_L10_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":16615 * pdst = int(cudaStream_t(dst)) * cydst = pdst * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamCopyAttributes(cydst, cysrc) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L11; } __pyx_L10_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L11:; } } /* "cuda/bindings/runtime.pyx":16617 * with nogil: * err = cyruntime.cudaStreamCopyAttributes(cydst, cysrc) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16617, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16617, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16617, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16617, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 16617, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":16576 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamCopyAttributes(dst, src): * """ Copies attributes from source stream to destination stream. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamCopyAttributes", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_psrc); __Pyx_XDECREF(__pyx_v_pdst); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":16619 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamGetAttribute(hStream, attr not None : cudaStreamAttrID): * """ Queries stream attribute. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_95cudaStreamGetAttribute(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_94cudaStreamGetAttribute, "cudaStreamGetAttribute(hStream, attr: cudaStreamAttrID)\n\nQueries stream attribute.\n\nQueries attribute `attr` from `hStream` and stores it in corresponding\nmember of `value_out`.\n\nParameters\n----------\nhStream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n\nattr : :py:obj:`~.cudaStreamAttrID`\n\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidResourceHandle`\nvalue_out : :py:obj:`~.cudaStreamAttrValue`\n\n\nSee Also\n--------\n:py:obj:`~.cudaAccessPolicyWindow`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_95cudaStreamGetAttribute = {"cudaStreamGetAttribute", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_95cudaStreamGetAttribute, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_94cudaStreamGetAttribute}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_95cudaStreamGetAttribute(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_hStream = 0; PyObject *__pyx_v_attr = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaStreamGetAttribute (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_hStream,&__pyx_mstate_global->__pyx_n_u_attr,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 16619, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 16619, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16619, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaStreamGetAttribute", 0) < (0)) __PYX_ERR(0, 16619, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaStreamGetAttribute", 1, 2, 2, i); __PYX_ERR(0, 16619, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16619, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 16619, __pyx_L3_error) } __pyx_v_hStream = values[0]; __pyx_v_attr = values[1]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaStreamGetAttribute", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 16619, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamGetAttribute", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_attr) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "attr"); __PYX_ERR(0, 16620, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_94cudaStreamGetAttribute(__pyx_self, __pyx_v_hStream, __pyx_v_attr); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_94cudaStreamGetAttribute(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_hStream, PyObject *__pyx_v_attr) { cudaStream_t __pyx_v_cyhStream; PyObject *__pyx_v_phStream = NULL; __pyx_t_4cuda_8bindings_9cyruntime_cudaStreamAttrID __pyx_v_cyattr; struct __pyx_obj_4cuda_8bindings_7runtime_cudaStreamAttrValue *__pyx_v_value_out = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; __pyx_t_4cuda_8bindings_9cyruntime_cudaStreamAttrID __pyx_t_8; cudaError_t __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaStreamGetAttribute", 0); /* "cuda/bindings/runtime.pyx":16645 * """ * cdef cyruntime.cudaStream_t cyhStream * if hStream is None: # <<<<<<<<<<<<<< * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_hStream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16646 * cdef cyruntime.cudaStream_t cyhStream * if hStream is None: * phStream = 0 # <<<<<<<<<<<<<< * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): * phStream = int(hStream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_phStream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":16645 * """ * cdef cyruntime.cudaStream_t cyhStream * if hStream is None: # <<<<<<<<<<<<<< * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":16647 * if hStream is None: * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * phStream = int(hStream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_hStream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_hStream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16648 * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): * phStream = int(hStream) # <<<<<<<<<<<<<< * else: * phStream = int(cudaStream_t(hStream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_hStream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16648, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_phStream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":16647 * if hStream is None: * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * phStream = int(hStream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":16650 * phStream = int(hStream) * else: * phStream = int(cudaStream_t(hStream)) # <<<<<<<<<<<<<< * cyhStream = phStream * cdef cyruntime.cudaStreamAttrID cyattr = attr.value */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_hStream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16650, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16650, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_phStream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":16651 * else: * phStream = int(cudaStream_t(hStream)) * cyhStream = phStream # <<<<<<<<<<<<<< * cdef cyruntime.cudaStreamAttrID cyattr = attr.value * cdef cudaStreamAttrValue value_out = cudaStreamAttrValue() */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_phStream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 16651, __pyx_L1_error) __pyx_v_cyhStream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":16652 * phStream = int(cudaStream_t(hStream)) * cyhStream = phStream * cdef cyruntime.cudaStreamAttrID cyattr = attr.value # <<<<<<<<<<<<<< * cdef cudaStreamAttrValue value_out = cudaStreamAttrValue() * with nogil: */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_attr, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16652, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_8 = ((enum cudaLaunchAttributeID)__Pyx_PyLong_As_enum__cudaLaunchAttributeID(__pyx_t_5)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 16652, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_cyattr = __pyx_t_8; /* "cuda/bindings/runtime.pyx":16653 * cyhStream = phStream * cdef cyruntime.cudaStreamAttrID cyattr = attr.value * cdef cudaStreamAttrValue value_out = cudaStreamAttrValue() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaStreamGetAttribute(cyhStream, cyattr, value_out._pvt_ptr) */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStreamAttrValue); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStreamAttrValue); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, NULL}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (1-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16653, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_value_out = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaStreamAttrValue *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":16654 * cdef cyruntime.cudaStreamAttrID cyattr = attr.value * cdef cudaStreamAttrValue value_out = cudaStreamAttrValue() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamGetAttribute(cyhStream, cyattr, value_out._pvt_ptr) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":16655 * cdef cudaStreamAttrValue value_out = cudaStreamAttrValue() * with nogil: * err = cyruntime.cudaStreamGetAttribute(cyhStream, cyattr, value_out._pvt_ptr) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_9 = __pyx_f_4cuda_8bindings_9cyruntime_cudaStreamGetAttribute(__pyx_v_cyhStream, __pyx_v_cyattr, ((__pyx_t_4cuda_8bindings_9cyruntime_cudaStreamAttrValue *)__pyx_v_value_out->__pyx_base._pvt_ptr)); if (unlikely(__pyx_t_9 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 16655, __pyx_L7_error) __pyx_v_err = __pyx_t_9; } /* "cuda/bindings/runtime.pyx":16654 * cdef cyruntime.cudaStreamAttrID cyattr = attr.value * cdef cudaStreamAttrValue value_out = cudaStreamAttrValue() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamGetAttribute(cyhStream, cyattr, value_out._pvt_ptr) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":16656 * with nogil: * err = cyruntime.cudaStreamGetAttribute(cyhStream, cyattr, value_out._pvt_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], value_out) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16657 * err = cyruntime.cudaStreamGetAttribute(cyhStream, cyattr, value_out._pvt_ptr) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], value_out) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16657, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16657, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16657, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16657, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 16657, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, Py_None) != (0)) __PYX_ERR(0, 16657, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":16656 * with nogil: * err = cyruntime.cudaStreamGetAttribute(cyhStream, cyattr, value_out._pvt_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], value_out) */ } /* "cuda/bindings/runtime.pyx":16658 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], value_out) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 16658, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_value_out); __Pyx_GIVEREF((PyObject *)__pyx_v_value_out); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, ((PyObject *)__pyx_v_value_out)) != (0)) __PYX_ERR(0, 16658, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":16619 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamGetAttribute(hStream, attr not None : cudaStreamAttrID): * """ Queries stream attribute. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamGetAttribute", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_phStream); __Pyx_XDECREF((PyObject *)__pyx_v_value_out); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":16660 * return (_dict_cudaError_t[err], value_out) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamSetAttribute(hStream, attr not None : cudaStreamAttrID, value : Optional[cudaStreamAttrValue]): * """ Sets stream attribute. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_97cudaStreamSetAttribute(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_96cudaStreamSetAttribute, "cudaStreamSetAttribute(hStream, attr: cudaStreamAttrID, cudaStreamAttrValue value: Optional[cudaStreamAttrValue])\n\nSets stream attribute.\n\nSets attribute `attr` on `hStream` from corresponding attribute of\n`value`. The updated attribute will be applied to subsequent work\nsubmitted to the stream. It will not affect previously submitted work.\n\nParameters\n----------\nhStream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n\nattr : :py:obj:`~.cudaStreamAttrID`\n\nvalue : :py:obj:`~.cudaStreamAttrValue`\n\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidResourceHandle`\n\nSee Also\n--------\n:py:obj:`~.cudaAccessPolicyWindow`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_97cudaStreamSetAttribute = {"cudaStreamSetAttribute", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_97cudaStreamSetAttribute, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_96cudaStreamSetAttribute}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_97cudaStreamSetAttribute(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_hStream = 0; PyObject *__pyx_v_attr = 0; struct __pyx_obj_4cuda_8bindings_7runtime_cudaStreamAttrValue *__pyx_v_value = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[3] = {0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaStreamSetAttribute (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_hStream,&__pyx_mstate_global->__pyx_n_u_attr,&__pyx_mstate_global->__pyx_n_u_value,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 16660, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 16660, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 16660, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16660, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaStreamSetAttribute", 0) < (0)) __PYX_ERR(0, 16660, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 3; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaStreamSetAttribute", 1, 3, 3, i); __PYX_ERR(0, 16660, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 3)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16660, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 16660, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 16660, __pyx_L3_error) } __pyx_v_hStream = values[0]; __pyx_v_attr = values[1]; __pyx_v_value = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaStreamAttrValue *)values[2]); } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaStreamSetAttribute", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 16660, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamSetAttribute", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_attr) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "attr"); __PYX_ERR(0, 16661, __pyx_L1_error) } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_value), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStreamAttrValue, 1, "value", 0))) __PYX_ERR(0, 16661, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_96cudaStreamSetAttribute(__pyx_self, __pyx_v_hStream, __pyx_v_attr, __pyx_v_value); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_96cudaStreamSetAttribute(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_hStream, PyObject *__pyx_v_attr, struct __pyx_obj_4cuda_8bindings_7runtime_cudaStreamAttrValue *__pyx_v_value) { cudaStream_t __pyx_v_cyhStream; PyObject *__pyx_v_phStream = NULL; __pyx_t_4cuda_8bindings_9cyruntime_cudaStreamAttrID __pyx_v_cyattr; __pyx_t_4cuda_8bindings_9cyruntime_cudaStreamAttrValue *__pyx_v_cyvalue_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; __pyx_t_4cuda_8bindings_9cyruntime_cudaStreamAttrID __pyx_t_8; union cudaLaunchAttributeValue *__pyx_t_9; cudaError_t __pyx_t_10; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaStreamSetAttribute", 0); /* "cuda/bindings/runtime.pyx":16687 * """ * cdef cyruntime.cudaStream_t cyhStream * if hStream is None: # <<<<<<<<<<<<<< * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_hStream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16688 * cdef cyruntime.cudaStream_t cyhStream * if hStream is None: * phStream = 0 # <<<<<<<<<<<<<< * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): * phStream = int(hStream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_phStream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":16687 * """ * cdef cyruntime.cudaStream_t cyhStream * if hStream is None: # <<<<<<<<<<<<<< * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":16689 * if hStream is None: * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * phStream = int(hStream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_hStream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_hStream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16690 * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): * phStream = int(hStream) # <<<<<<<<<<<<<< * else: * phStream = int(cudaStream_t(hStream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_hStream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16690, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_phStream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":16689 * if hStream is None: * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * phStream = int(hStream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":16692 * phStream = int(hStream) * else: * phStream = int(cudaStream_t(hStream)) # <<<<<<<<<<<<<< * cyhStream = phStream * cdef cyruntime.cudaStreamAttrID cyattr = attr.value */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_hStream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16692, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16692, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_phStream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":16693 * else: * phStream = int(cudaStream_t(hStream)) * cyhStream = phStream # <<<<<<<<<<<<<< * cdef cyruntime.cudaStreamAttrID cyattr = attr.value * cdef cyruntime.cudaStreamAttrValue* cyvalue_ptr = value._pvt_ptr if value is not None else NULL */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_phStream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 16693, __pyx_L1_error) __pyx_v_cyhStream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":16694 * phStream = int(cudaStream_t(hStream)) * cyhStream = phStream * cdef cyruntime.cudaStreamAttrID cyattr = attr.value # <<<<<<<<<<<<<< * cdef cyruntime.cudaStreamAttrValue* cyvalue_ptr = value._pvt_ptr if value is not None else NULL * with nogil: */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_attr, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16694, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_8 = ((enum cudaLaunchAttributeID)__Pyx_PyLong_As_enum__cudaLaunchAttributeID(__pyx_t_5)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 16694, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_cyattr = __pyx_t_8; /* "cuda/bindings/runtime.pyx":16695 * cyhStream = phStream * cdef cyruntime.cudaStreamAttrID cyattr = attr.value * cdef cyruntime.cudaStreamAttrValue* cyvalue_ptr = value._pvt_ptr if value is not None else NULL # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaStreamSetAttribute(cyhStream, cyattr, cyvalue_ptr) */ __pyx_t_1 = (((PyObject *)__pyx_v_value) != Py_None); if (__pyx_t_1) { __pyx_t_9 = __pyx_v_value->__pyx_base._pvt_ptr; } else { __pyx_t_9 = NULL; } __pyx_v_cyvalue_ptr = __pyx_t_9; /* "cuda/bindings/runtime.pyx":16696 * cdef cyruntime.cudaStreamAttrID cyattr = attr.value * cdef cyruntime.cudaStreamAttrValue* cyvalue_ptr = value._pvt_ptr if value is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamSetAttribute(cyhStream, cyattr, cyvalue_ptr) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":16697 * cdef cyruntime.cudaStreamAttrValue* cyvalue_ptr = value._pvt_ptr if value is not None else NULL * with nogil: * err = cyruntime.cudaStreamSetAttribute(cyhStream, cyattr, cyvalue_ptr) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_10 = __pyx_f_4cuda_8bindings_9cyruntime_cudaStreamSetAttribute(__pyx_v_cyhStream, __pyx_v_cyattr, __pyx_v_cyvalue_ptr); if (unlikely(__pyx_t_10 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 16697, __pyx_L7_error) __pyx_v_err = __pyx_t_10; } /* "cuda/bindings/runtime.pyx":16696 * cdef cyruntime.cudaStreamAttrID cyattr = attr.value * cdef cyruntime.cudaStreamAttrValue* cyvalue_ptr = value._pvt_ptr if value is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamSetAttribute(cyhStream, cyattr, cyvalue_ptr) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":16698 * with nogil: * err = cyruntime.cudaStreamSetAttribute(cyhStream, cyattr, cyvalue_ptr) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 16698, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":16660 * return (_dict_cudaError_t[err], value_out) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamSetAttribute(hStream, attr not None : cudaStreamAttrID, value : Optional[cudaStreamAttrValue]): * """ Sets stream attribute. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamSetAttribute", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_phStream); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":16700 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamDestroy(stream): * """ Destroys and cleans up an asynchronous stream. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_99cudaStreamDestroy(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_98cudaStreamDestroy, "cudaStreamDestroy(stream)\n\nDestroys and cleans up an asynchronous stream.\n\nDestroys and cleans up the asynchronous stream specified by `stream`.\n\nIn case the device is still doing work in the stream `stream` when\n:py:obj:`~.cudaStreamDestroy()` is called, the function will return\nimmediately and the resources associated with `stream` will be released\nautomatically once the device has completed all work in `stream`.\n\nParameters\n----------\nstream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Stream identifier\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidResourceHandle`\n\nSee Also\n--------\n:py:obj:`~.cudaStreamCreate`, :py:obj:`~.cudaStreamCreateWithFlags`, :py:obj:`~.cudaStreamQuery`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cudaStreamSynchronize`, :py:obj:`~.cudaStreamAddCallback`, :py:obj:`~.cuStreamDestroy`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_99cudaStreamDestroy = {"cudaStreamDestroy", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_99cudaStreamDestroy, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_98cudaStreamDestroy}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_99cudaStreamDestroy(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_stream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaStreamDestroy (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_stream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 16700, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16700, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaStreamDestroy", 0) < (0)) __PYX_ERR(0, 16700, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaStreamDestroy", 1, 1, 1, i); __PYX_ERR(0, 16700, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16700, __pyx_L3_error) } __pyx_v_stream = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaStreamDestroy", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 16700, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamDestroy", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_98cudaStreamDestroy(__pyx_self, __pyx_v_stream); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_98cudaStreamDestroy(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_stream) { cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaStreamDestroy", 0); /* "cuda/bindings/runtime.pyx":16726 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16727 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":16726 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":16728 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16729 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16729, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":16728 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":16731 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * with nogil: */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_stream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16731, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16731, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":16732 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaStreamDestroy(cystream) */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 16732, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":16733 * pstream = int(cudaStream_t(stream)) * cystream = pstream * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamDestroy(cystream) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":16734 * cystream = pstream * with nogil: * err = cyruntime.cudaStreamDestroy(cystream) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaStreamDestroy(__pyx_v_cystream); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 16734, __pyx_L7_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":16733 * pstream = int(cudaStream_t(stream)) * cystream = pstream * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamDestroy(cystream) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":16735 * with nogil: * err = cyruntime.cudaStreamDestroy(cystream) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16735, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16735, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16735, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16735, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 16735, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":16700 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamDestroy(stream): * """ Destroys and cleans up an asynchronous stream. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamDestroy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":16737 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamWaitEvent(stream, event, unsigned int flags): * """ Make a compute stream wait on an event. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_101cudaStreamWaitEvent(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_100cudaStreamWaitEvent, "cudaStreamWaitEvent(stream, event, unsigned int flags)\n\nMake a compute stream wait on an event.\n\nMakes all future work submitted to `stream` wait for all work captured\nin `event`. See :py:obj:`~.cudaEventRecord()` for details on what is\ncaptured by an event. The synchronization will be performed efficiently\non the device when applicable. `event` may be from a different device\nthan `stream`.\n\nflags include:\n\n- :py:obj:`~.cudaEventWaitDefault`: Default event creation flag.\n\n- :py:obj:`~.cudaEventWaitExternal`: Event is captured in the graph as\n an external event node when performing stream capture.\n\nParameters\n----------\nstream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Stream to wait\nevent : :py:obj:`~.CUevent` or :py:obj:`~.cudaEvent_t`\n Event to wait on\nflags : unsigned int\n Parameters for the operation(See above)\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidResourceHandle`\n\nSee Also\n--------\n:py:obj:`~.cudaStreamCreate`, :py:obj:`~.cudaStreamCreateWithFlags`, :py:obj:`~.cudaStreamQuery`, :py:obj:`~.cudaStreamSynchronize`, :py:obj:`~.cudaStreamAddCallback`, :py:obj:`~.cudaStreamDestroy`, :py:obj:`~.cuStreamWaitEvent`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_101cudaStreamWaitEvent = {"cudaStreamWaitEvent", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_101cudaStreamWaitEvent, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_100cudaStreamWaitEvent}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_101cudaStreamWaitEvent(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_stream = 0; PyObject *__pyx_v_event = 0; unsigned int __pyx_v_flags; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[3] = {0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaStreamWaitEvent (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_stream,&__pyx_mstate_global->__pyx_n_u_event_2,&__pyx_mstate_global->__pyx_n_u_flags_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 16737, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 16737, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 16737, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16737, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaStreamWaitEvent", 0) < (0)) __PYX_ERR(0, 16737, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 3; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaStreamWaitEvent", 1, 3, 3, i); __PYX_ERR(0, 16737, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 3)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16737, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 16737, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 16737, __pyx_L3_error) } __pyx_v_stream = values[0]; __pyx_v_event = values[1]; __pyx_v_flags = __Pyx_PyLong_As_unsigned_int(values[2]); if (unlikely((__pyx_v_flags == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 16738, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaStreamWaitEvent", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 16737, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamWaitEvent", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_100cudaStreamWaitEvent(__pyx_self, __pyx_v_stream, __pyx_v_event, __pyx_v_flags); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_100cudaStreamWaitEvent(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_stream, PyObject *__pyx_v_event, unsigned int __pyx_v_flags) { cudaEvent_t __pyx_v_cyevent; PyObject *__pyx_v_pevent = NULL; cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaStreamWaitEvent", 0); /* "cuda/bindings/runtime.pyx":16773 * """ * cdef cyruntime.cudaEvent_t cyevent * if event is None: # <<<<<<<<<<<<<< * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): */ __pyx_t_1 = (__pyx_v_event == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16774 * cdef cyruntime.cudaEvent_t cyevent * if event is None: * pevent = 0 # <<<<<<<<<<<<<< * elif isinstance(event, (cudaEvent_t,driver.CUevent)): * pevent = int(event) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pevent = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":16773 * """ * cdef cyruntime.cudaEvent_t cyevent * if event is None: # <<<<<<<<<<<<<< * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":16775 * if event is None: * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): # <<<<<<<<<<<<<< * pevent = int(event) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_event, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_event, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUevent); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16776 * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): * pevent = int(event) # <<<<<<<<<<<<<< * else: * pevent = int(cudaEvent_t(event)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_event); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16776, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pevent = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":16775 * if event is None: * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): # <<<<<<<<<<<<<< * pevent = int(event) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":16778 * pevent = int(event) * else: * pevent = int(cudaEvent_t(event)) # <<<<<<<<<<<<<< * cyevent = pevent * cdef cyruntime.cudaStream_t cystream */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_event}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16778, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16778, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pevent = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":16779 * else: * pevent = int(cudaEvent_t(event)) * cyevent = pevent # <<<<<<<<<<<<<< * cdef cyruntime.cudaStream_t cystream * if stream is None: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pevent); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 16779, __pyx_L1_error) __pyx_v_cyevent = ((cudaEvent_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":16781 * cyevent = pevent * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16782 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":16781 * cyevent = pevent * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L6; } /* "cuda/bindings/runtime.pyx":16783 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L7_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L7_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16784 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_5 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16784, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_v_pstream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":16783 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L6; } /* "cuda/bindings/runtime.pyx":16786 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * with nogil: */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_stream}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16786, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_5)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16786, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_5); __pyx_t_5 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L6:; /* "cuda/bindings/runtime.pyx":16787 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaStreamWaitEvent(cystream, cyevent, flags) */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 16787, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":16788 * pstream = int(cudaStream_t(stream)) * cystream = pstream * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamWaitEvent(cystream, cyevent, flags) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":16789 * cystream = pstream * with nogil: * err = cyruntime.cudaStreamWaitEvent(cystream, cyevent, flags) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaStreamWaitEvent(__pyx_v_cystream, __pyx_v_cyevent, __pyx_v_flags); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 16789, __pyx_L10_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":16788 * pstream = int(cudaStream_t(stream)) * cystream = pstream * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamWaitEvent(cystream, cyevent, flags) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L11; } __pyx_L10_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L11:; } } /* "cuda/bindings/runtime.pyx":16790 * with nogil: * err = cyruntime.cudaStreamWaitEvent(cystream, cyevent, flags) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * ctypedef struct cudaStreamCallbackData_st: */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16790, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16790, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16790, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16790, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 16790, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":16737 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamWaitEvent(stream, event, unsigned int flags): * """ Make a compute stream wait on an event. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamWaitEvent", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pevent); __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":16798 * ctypedef cudaStreamCallbackData_st cudaStreamCallbackData * * @cython.show_performance_hints(False) # <<<<<<<<<<<<<< * cdef void cudaStreamRtCallbackWrapper(cyruntime.cudaStream_t stream, cyruntime.cudaError_t status, void *data) nogil: * cdef cudaStreamCallbackData *cbData = data */ static void __pyx_f_4cuda_8bindings_7runtime_cudaStreamRtCallbackWrapper(cudaStream_t __pyx_v_stream, cudaError_t __pyx_v_status, void *__pyx_v_data) { __pyx_t_4cuda_8bindings_7runtime_cudaStreamCallbackData *__pyx_v_cbData; /* "cuda/bindings/runtime.pyx":16800 * @cython.show_performance_hints(False) * cdef void cudaStreamRtCallbackWrapper(cyruntime.cudaStream_t stream, cyruntime.cudaError_t status, void *data) nogil: * cdef cudaStreamCallbackData *cbData = data # <<<<<<<<<<<<<< * with gil: * cbData.callback(stream, status, cbData.userData) */ __pyx_v_cbData = ((__pyx_t_4cuda_8bindings_7runtime_cudaStreamCallbackData *)__pyx_v_data); /* "cuda/bindings/runtime.pyx":16801 * cdef void cudaStreamRtCallbackWrapper(cyruntime.cudaStream_t stream, cyruntime.cudaError_t status, void *data) nogil: * cdef cudaStreamCallbackData *cbData = data * with gil: # <<<<<<<<<<<<<< * cbData.callback(stream, status, cbData.userData) * free(cbData) */ { PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); /*try:*/ { /* "cuda/bindings/runtime.pyx":16802 * cdef cudaStreamCallbackData *cbData = data * with gil: * cbData.callback(stream, status, cbData.userData) # <<<<<<<<<<<<<< * free(cbData) * */ __pyx_v_cbData->callback(__pyx_v_stream, __pyx_v_status, __pyx_v_cbData->userData); } /* "cuda/bindings/runtime.pyx":16801 * cdef void cudaStreamRtCallbackWrapper(cyruntime.cudaStream_t stream, cyruntime.cudaError_t status, void *data) nogil: * cdef cudaStreamCallbackData *cbData = data * with gil: # <<<<<<<<<<<<<< * cbData.callback(stream, status, cbData.userData) * free(cbData) */ /*finally:*/ { /*normal exit:*/{ __Pyx_PyGILState_Release(__pyx_gilstate_save); goto __pyx_L5; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":16803 * with gil: * cbData.callback(stream, status, cbData.userData) * free(cbData) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ free(__pyx_v_cbData); /* "cuda/bindings/runtime.pyx":16798 * ctypedef cudaStreamCallbackData_st cudaStreamCallbackData * * @cython.show_performance_hints(False) # <<<<<<<<<<<<<< * cdef void cudaStreamRtCallbackWrapper(cyruntime.cudaStream_t stream, cyruntime.cudaError_t status, void *data) nogil: * cdef cudaStreamCallbackData *cbData = data */ /* function exit code */ } /* "cuda/bindings/runtime.pyx":16805 * free(cbData) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamAddCallback(stream, callback, userData, unsigned int flags): * """ Add a callback to a compute stream. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_103cudaStreamAddCallback(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_102cudaStreamAddCallback, "cudaStreamAddCallback(stream, callback, userData, unsigned int flags)\n\nAdd a callback to a compute stream.\n\nAdds a callback to be called on the host after all currently enqueued\nitems in the stream have completed. For each cudaStreamAddCallback\ncall, a callback will be executed exactly once. The callback will block\nlater work in the stream until it is finished.\n\nThe callback may be passed :py:obj:`~.cudaSuccess` or an error code. In\nthe event of a device error, all subsequently executed callbacks will\nreceive an appropriate :py:obj:`~.cudaError_t`.\n\nCallbacks must not make any CUDA API calls. Attempting to use CUDA APIs\nmay result in :py:obj:`~.cudaErrorNotPermitted`. Callbacks must not\nperform any synchronization that may depend on outstanding device work\nor other callbacks that are not mandated to run earlier. Callbacks\nwithout a mandated order (in independent streams) execute in undefined\norder and may be serialized.\n\nFor the purposes of Unified Memory, callback execution makes a number\nof guarantees:\n\n- The callback stream is considered idle for the duration of the\n callback. Thus, for example, a callback may always use memory\n attached to the callback stream.\n\n- The start of execution of a callback has the same effect as\n synchronizing an event recorded in the same stream immediately prior\n to the callback. It thus synchronizes streams which have been\n \"joined\" prior to the callback.\n\n- Adding device work to any stream does not have the effect of making\n the stream active until all preceding callbacks have executed. Thus,\n for example, a callback might use global attached memory even if work\n has been added to another stream, if it has been properly ordered\n with an event.\n\n- Completion of a callback does not cause a stream to become active\n except as described above. The callback stream will remain idle if no\n device work follows the callback, and will remain idle across\n consecutive callbacks without devi""ce work in between. Thus, for\n example, stream synchronization can be done by signaling from a\n callback at the end of the stream.\n\nParameters\n----------\nstream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Stream to add callback to\ncallback : :py:obj:`~.cudaStreamCallback_t`\n The function to call once preceding stream operations are complete\nuserData : Any\n User specified data to be passed to the callback function\nflags : unsigned int\n Reserved for future use, must be 0\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidResourceHandle`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorNotSupported`\n\nSee Also\n--------\n:py:obj:`~.cudaStreamCreate`, :py:obj:`~.cudaStreamCreateWithFlags`, :py:obj:`~.cudaStreamQuery`, :py:obj:`~.cudaStreamSynchronize`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cudaStreamDestroy`, :py:obj:`~.cudaMallocManaged`, :py:obj:`~.cudaStreamAttachMemAsync`, :py:obj:`~.cudaLaunchHostFunc`, :py:obj:`~.cuStreamAddCallback`\n\nNotes\n-----\nThis function is slated for eventual deprecation and removal. If you do not require the callback to execute in case of a device error, consider using :py:obj:`~.cudaLaunchHostFunc`. Additionally, this function is not supported with :py:obj:`~.cudaStreamBeginCapture` and :py:obj:`~.cudaStreamEndCapture`, unlike :py:obj:`~.cudaLaunchHostFunc`."); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_103cudaStreamAddCallback = {"cudaStreamAddCallback", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_103cudaStreamAddCallback, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_102cudaStreamAddCallback}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_103cudaStreamAddCallback(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_stream = 0; PyObject *__pyx_v_callback = 0; PyObject *__pyx_v_userData = 0; unsigned int __pyx_v_flags; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[4] = {0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaStreamAddCallback (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_stream,&__pyx_mstate_global->__pyx_n_u_callback,&__pyx_mstate_global->__pyx_n_u_userData_2,&__pyx_mstate_global->__pyx_n_u_flags_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 16805, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 16805, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 16805, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 16805, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16805, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaStreamAddCallback", 0) < (0)) __PYX_ERR(0, 16805, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 4; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaStreamAddCallback", 1, 4, 4, i); __PYX_ERR(0, 16805, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 4)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16805, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 16805, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 16805, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 16805, __pyx_L3_error) } __pyx_v_stream = values[0]; __pyx_v_callback = values[1]; __pyx_v_userData = values[2]; __pyx_v_flags = __Pyx_PyLong_As_unsigned_int(values[3]); if (unlikely((__pyx_v_flags == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 16806, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaStreamAddCallback", 1, 4, 4, __pyx_nargs); __PYX_ERR(0, 16805, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamAddCallback", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_102cudaStreamAddCallback(__pyx_self, __pyx_v_stream, __pyx_v_callback, __pyx_v_userData, __pyx_v_flags); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_102cudaStreamAddCallback(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_stream, PyObject *__pyx_v_callback, PyObject *__pyx_v_userData, unsigned int __pyx_v_flags) { cudaStreamCallback_t __pyx_v_cycallback; PyObject *__pyx_v_pcallback = NULL; cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cyuserData = NULL; void *__pyx_v_cyuserData_ptr; __pyx_t_4cuda_8bindings_7runtime_cudaStreamCallbackData *__pyx_v_cbData; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; int __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaStreamAddCallback", 0); /* "cuda/bindings/runtime.pyx":16875 * """ * cdef cyruntime.cudaStreamCallback_t cycallback * if callback is None: # <<<<<<<<<<<<<< * pcallback = 0 * elif isinstance(callback, (cudaStreamCallback_t,)): */ __pyx_t_1 = (__pyx_v_callback == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16876 * cdef cyruntime.cudaStreamCallback_t cycallback * if callback is None: * pcallback = 0 # <<<<<<<<<<<<<< * elif isinstance(callback, (cudaStreamCallback_t,)): * pcallback = int(callback) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pcallback = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":16875 * """ * cdef cyruntime.cudaStreamCallback_t cycallback * if callback is None: # <<<<<<<<<<<<<< * pcallback = 0 * elif isinstance(callback, (cudaStreamCallback_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":16877 * if callback is None: * pcallback = 0 * elif isinstance(callback, (cudaStreamCallback_t,)): # <<<<<<<<<<<<<< * pcallback = int(callback) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_callback, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStreamCallback_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16878 * pcallback = 0 * elif isinstance(callback, (cudaStreamCallback_t,)): * pcallback = int(callback) # <<<<<<<<<<<<<< * else: * pcallback = int(cudaStreamCallback_t(callback)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_callback); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16878, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_pcallback = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":16877 * if callback is None: * pcallback = 0 * elif isinstance(callback, (cudaStreamCallback_t,)): # <<<<<<<<<<<<<< * pcallback = int(callback) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":16880 * pcallback = int(callback) * else: * pcallback = int(cudaStreamCallback_t(callback)) # <<<<<<<<<<<<<< * cycallback = pcallback * cdef cyruntime.cudaStream_t cystream */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStreamCallback_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStreamCallback_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_callback}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16880, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16880, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_pcallback = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":16881 * else: * pcallback = int(cudaStreamCallback_t(callback)) * cycallback = pcallback # <<<<<<<<<<<<<< * cdef cyruntime.cudaStream_t cystream * if stream is None: */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pcallback); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 16881, __pyx_L1_error) __pyx_v_cycallback = ((cudaStreamCallback_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":16883 * cycallback = pcallback * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16884 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":16883 * cycallback = pcallback * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L4; } /* "cuda/bindings/runtime.pyx":16885 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_7 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_7) { } else { __pyx_t_1 = __pyx_t_7; goto __pyx_L5_bool_binop_done; } __pyx_t_7 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_7; __pyx_L5_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16886 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_4 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16886, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_v_pstream = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "cuda/bindings/runtime.pyx":16885 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L4; } /* "cuda/bindings/runtime.pyx":16888 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * cyuserData = _HelperInputVoidPtr(userData) */ /*else*/ { __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_stream}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16888, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_4); } __pyx_t_3 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_4)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16888, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF((PyObject *)__pyx_t_4); __pyx_t_4 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; } __pyx_L4:; /* "cuda/bindings/runtime.pyx":16889 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * cyuserData = _HelperInputVoidPtr(userData) * cdef void* cyuserData_ptr = cyuserData.cptr */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 16889, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":16890 * pstream = int(cudaStream_t(stream)) * cystream = pstream * cyuserData = _HelperInputVoidPtr(userData) # <<<<<<<<<<<<<< * cdef void* cyuserData_ptr = cyuserData.cptr * */ __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_userData}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16890, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_v_cyuserData = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":16891 * cystream = pstream * cyuserData = _HelperInputVoidPtr(userData) * cdef void* cyuserData_ptr = cyuserData.cptr # <<<<<<<<<<<<<< * * cdef cudaStreamCallbackData *cbData = NULL */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cyuserData), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16891, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_3); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 16891, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_cyuserData_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":16893 * cdef void* cyuserData_ptr = cyuserData.cptr * * cdef cudaStreamCallbackData *cbData = NULL # <<<<<<<<<<<<<< * cbData = malloc(sizeof(cbData[0])) * if cbData == NULL: */ __pyx_v_cbData = NULL; /* "cuda/bindings/runtime.pyx":16894 * * cdef cudaStreamCallbackData *cbData = NULL * cbData = malloc(sizeof(cbData[0])) # <<<<<<<<<<<<<< * if cbData == NULL: * return (cudaError_t.cudaErrorMemoryAllocation,) */ __pyx_v_cbData = ((__pyx_t_4cuda_8bindings_7runtime_cudaStreamCallbackData *)malloc((sizeof((__pyx_v_cbData[0]))))); /* "cuda/bindings/runtime.pyx":16895 * cdef cudaStreamCallbackData *cbData = NULL * cbData = malloc(sizeof(cbData[0])) * if cbData == NULL: # <<<<<<<<<<<<<< * return (cudaError_t.cudaErrorMemoryAllocation,) * cbData.callback = cycallback */ __pyx_t_1 = (__pyx_v_cbData == NULL); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16896 * cbData = malloc(sizeof(cbData[0])) * if cbData == NULL: * return (cudaError_t.cudaErrorMemoryAllocation,) # <<<<<<<<<<<<<< * cbData.callback = cycallback * cbData.userData = cyuserData_ptr */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16896, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_cudaErrorMemoryAllocation); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16896, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16896, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 16896, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":16895 * cdef cudaStreamCallbackData *cbData = NULL * cbData = malloc(sizeof(cbData[0])) * if cbData == NULL: # <<<<<<<<<<<<<< * return (cudaError_t.cudaErrorMemoryAllocation,) * cbData.callback = cycallback */ } /* "cuda/bindings/runtime.pyx":16897 * if cbData == NULL: * return (cudaError_t.cudaErrorMemoryAllocation,) * cbData.callback = cycallback # <<<<<<<<<<<<<< * cbData.userData = cyuserData_ptr * */ __pyx_v_cbData->callback = __pyx_v_cycallback; /* "cuda/bindings/runtime.pyx":16898 * return (cudaError_t.cudaErrorMemoryAllocation,) * cbData.callback = cycallback * cbData.userData = cyuserData_ptr # <<<<<<<<<<<<<< * * with nogil: */ __pyx_v_cbData->userData = __pyx_v_cyuserData_ptr; /* "cuda/bindings/runtime.pyx":16900 * cbData.userData = cyuserData_ptr * * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamAddCallback(cystream, cudaStreamRtCallbackWrapper, cbData, flags) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":16901 * * with nogil: * err = cyruntime.cudaStreamAddCallback(cystream, cudaStreamRtCallbackWrapper, cbData, flags) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * free(cbData) */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaStreamAddCallback(__pyx_v_cystream, ((cudaStreamCallback_t)__pyx_f_4cuda_8bindings_7runtime_cudaStreamRtCallbackWrapper), ((void *)__pyx_v_cbData), __pyx_v_flags); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 16901, __pyx_L9_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":16900 * cbData.userData = cyuserData_ptr * * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamAddCallback(cystream, cudaStreamRtCallbackWrapper, cbData, flags) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L10; } __pyx_L9_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L10:; } } /* "cuda/bindings/runtime.pyx":16902 * with nogil: * err = cyruntime.cudaStreamAddCallback(cystream, cudaStreamRtCallbackWrapper, cbData, flags) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * free(cbData) * return (_dict_cudaError_t[err],) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16903 * err = cyruntime.cudaStreamAddCallback(cystream, cudaStreamRtCallbackWrapper, cbData, flags) * if err != cyruntime.cudaSuccess: * free(cbData) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ free(__pyx_v_cbData); /* "cuda/bindings/runtime.pyx":16902 * with nogil: * err = cyruntime.cudaStreamAddCallback(cystream, cudaStreamRtCallbackWrapper, cbData, flags) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * free(cbData) * return (_dict_cudaError_t[err],) */ } /* "cuda/bindings/runtime.pyx":16904 * if err != cyruntime.cudaSuccess: * free(cbData) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16904, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16904, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16904, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16904, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 16904, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":16805 * free(cbData) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamAddCallback(stream, callback, userData, unsigned int flags): * """ Add a callback to a compute stream. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamAddCallback", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pcallback); __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XDECREF((PyObject *)__pyx_v_cyuserData); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":16906 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamSynchronize(stream): * """ Waits for stream tasks to complete. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_105cudaStreamSynchronize(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_104cudaStreamSynchronize, "cudaStreamSynchronize(stream)\n\nWaits for stream tasks to complete.\n\nBlocks until `stream` has completed all operations. If the\n:py:obj:`~.cudaDeviceScheduleBlockingSync` flag was set for this\ndevice, the host thread will block until the stream is finished with\nall of its tasks.\n\nParameters\n----------\nstream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Stream identifier\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidResourceHandle`\n\nSee Also\n--------\n:py:obj:`~.cudaStreamCreate`, :py:obj:`~.cudaStreamCreateWithFlags`, :py:obj:`~.cudaStreamQuery`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cudaStreamAddCallback`, :py:obj:`~.cudaStreamDestroy`, :py:obj:`~.cuStreamSynchronize`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_105cudaStreamSynchronize = {"cudaStreamSynchronize", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_105cudaStreamSynchronize, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_104cudaStreamSynchronize}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_105cudaStreamSynchronize(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_stream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaStreamSynchronize (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_stream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 16906, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16906, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaStreamSynchronize", 0) < (0)) __PYX_ERR(0, 16906, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaStreamSynchronize", 1, 1, 1, i); __PYX_ERR(0, 16906, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16906, __pyx_L3_error) } __pyx_v_stream = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaStreamSynchronize", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 16906, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamSynchronize", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_104cudaStreamSynchronize(__pyx_self, __pyx_v_stream); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_104cudaStreamSynchronize(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_stream) { cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaStreamSynchronize", 0); /* "cuda/bindings/runtime.pyx":16930 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16931 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":16930 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":16932 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16933 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16933, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":16932 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":16935 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * with nogil: */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_stream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16935, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16935, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":16936 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaStreamSynchronize(cystream) */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 16936, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":16937 * pstream = int(cudaStream_t(stream)) * cystream = pstream * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamSynchronize(cystream) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":16938 * cystream = pstream * with nogil: * err = cyruntime.cudaStreamSynchronize(cystream) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaStreamSynchronize(__pyx_v_cystream); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 16938, __pyx_L7_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":16937 * pstream = int(cudaStream_t(stream)) * cystream = pstream * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamSynchronize(cystream) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":16939 * with nogil: * err = cyruntime.cudaStreamSynchronize(cystream) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16939, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16939, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16939, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16939, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 16939, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":16906 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamSynchronize(stream): * """ Waits for stream tasks to complete. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamSynchronize", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":16941 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamQuery(stream): * """ Queries an asynchronous stream for completion status. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_107cudaStreamQuery(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_106cudaStreamQuery, "cudaStreamQuery(stream)\n\nQueries an asynchronous stream for completion status.\n\nReturns :py:obj:`~.cudaSuccess` if all operations in `stream` have\ncompleted, or :py:obj:`~.cudaErrorNotReady` if not.\n\nFor the purposes of Unified Memory, a return value of\n:py:obj:`~.cudaSuccess` is equivalent to having called\n:py:obj:`~.cudaStreamSynchronize()`.\n\nParameters\n----------\nstream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Stream identifier\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorNotReady`, :py:obj:`~.cudaErrorInvalidResourceHandle`\n\nSee Also\n--------\n:py:obj:`~.cudaStreamCreate`, :py:obj:`~.cudaStreamCreateWithFlags`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cudaStreamSynchronize`, :py:obj:`~.cudaStreamAddCallback`, :py:obj:`~.cudaStreamDestroy`, :py:obj:`~.cuStreamQuery`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_107cudaStreamQuery = {"cudaStreamQuery", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_107cudaStreamQuery, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_106cudaStreamQuery}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_107cudaStreamQuery(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_stream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaStreamQuery (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_stream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 16941, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16941, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaStreamQuery", 0) < (0)) __PYX_ERR(0, 16941, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaStreamQuery", 1, 1, 1, i); __PYX_ERR(0, 16941, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16941, __pyx_L3_error) } __pyx_v_stream = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaStreamQuery", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 16941, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamQuery", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_106cudaStreamQuery(__pyx_self, __pyx_v_stream); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_106cudaStreamQuery(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_stream) { cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaStreamQuery", 0); /* "cuda/bindings/runtime.pyx":16967 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16968 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":16967 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":16969 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":16970 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16970, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":16969 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":16972 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * with nogil: */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_stream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16972, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16972, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":16973 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaStreamQuery(cystream) */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 16973, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":16974 * pstream = int(cudaStream_t(stream)) * cystream = pstream * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamQuery(cystream) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":16975 * cystream = pstream * with nogil: * err = cyruntime.cudaStreamQuery(cystream) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaStreamQuery(__pyx_v_cystream); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 16975, __pyx_L7_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":16974 * pstream = int(cudaStream_t(stream)) * cystream = pstream * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamQuery(cystream) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":16976 * with nogil: * err = cyruntime.cudaStreamQuery(cystream) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 16976, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16976, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16976, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16976, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 16976, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":16941 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamQuery(stream): * """ Queries an asynchronous stream for completion status. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamQuery", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":16978 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamAttachMemAsync(stream, devPtr, size_t length, unsigned int flags): * """ Attach memory to a stream asynchronously. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_109cudaStreamAttachMemAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_108cudaStreamAttachMemAsync, "cudaStreamAttachMemAsync(stream, devPtr, size_t length, unsigned int flags)\n\nAttach memory to a stream asynchronously.\n\nEnqueues an operation in `stream` to specify stream association of\n`length` bytes of memory starting from `devPtr`. This function is a\nstream-ordered operation, meaning that it is dependent on, and will\nonly take effect when, previous work in stream has completed. Any\nprevious association is automatically replaced.\n\n`devPtr` must point to an one of the following types of memories:\n\n- managed memory declared using the managed keyword or allocated with\n :py:obj:`~.cudaMallocManaged`.\n\n- a valid host-accessible region of system-allocated pageable memory.\n This type of memory may only be specified if the device associated\n with the stream reports a non-zero value for the device attribute\n :py:obj:`~.cudaDevAttrPageableMemoryAccess`.\n\nFor managed allocations, `length` must be either zero or the entire\nallocation's size. Both indicate that the entire allocation's stream\nassociation is being changed. Currently, it is not possible to change\nstream association for a portion of a managed allocation.\n\nFor pageable allocations, `length` must be non-zero.\n\nThe stream association is specified using `flags` which must be one of\n:py:obj:`~.cudaMemAttachGlobal`, :py:obj:`~.cudaMemAttachHost` or\n:py:obj:`~.cudaMemAttachSingle`. The default value for `flags` is\n:py:obj:`~.cudaMemAttachSingle` If the :py:obj:`~.cudaMemAttachGlobal`\nflag is specified, the memory can be accessed by any stream on any\ndevice. If the :py:obj:`~.cudaMemAttachHost` flag is specified, the\nprogram makes a guarantee that it won't access the memory on the device\nfrom any stream on a device that has a zero value for the device\nattribute :py:obj:`~.cudaDevAttrConcurrentManagedAccess`. If the\n:py:obj:`~.cudaMemAttachSingle` flag is specified and `stream` is\nassociated with a device that has a zero value for the device attribute\n:py:obj:`~.cudaDevAttrConcurr""entManagedAccess`, the program makes a\nguarantee that it will only access the memory on the device from\n`stream`. It is illegal to attach singly to the NULL stream, because\nthe NULL stream is a virtual global stream and not a specific stream.\nAn error will be returned in this case.\n\nWhen memory is associated with a single stream, the Unified Memory\nsystem will allow CPU access to this memory region so long as all\noperations in `stream` have completed, regardless of whether other\nstreams are active. In effect, this constrains exclusive ownership of\nthe managed memory region by an active GPU to per-stream activity\ninstead of whole-GPU activity.\n\nAccessing memory on the device from streams that are not associated\nwith it will produce undefined results. No error checking is performed\nby the Unified Memory system to ensure that kernels launched into other\nstreams do not access this region.\n\nIt is a program's responsibility to order calls to\n:py:obj:`~.cudaStreamAttachMemAsync` via events, synchronization or\nother means to ensure legal access to memory at all times. Data\nvisibility and coherency will be changed appropriately for all kernels\nwhich follow a stream-association change.\n\nIf `stream` is destroyed while data is associated with it, the\nassociation is removed and the association reverts to the default\nvisibility of the allocation as specified at\n:py:obj:`~.cudaMallocManaged`. For managed variables, the default\nassociation is always :py:obj:`~.cudaMemAttachGlobal`. Note that\ndestroying a stream is an asynchronous operation, and as a result, the\nchange to default association won't happen until all work in the stream\nhas completed.\n\nParameters\n----------\nstream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Stream in which to enqueue the attach operation\ndevPtr : Any\n Pointer to memory (must be a pointer to managed memory or to a\n valid host-accessible region of system-allocated memory)\nlength : size_t\n Length"" of memory (defaults to zero)\nflags : unsigned int\n Must be one of :py:obj:`~.cudaMemAttachGlobal`,\n :py:obj:`~.cudaMemAttachHost` or :py:obj:`~.cudaMemAttachSingle`\n (defaults to :py:obj:`~.cudaMemAttachSingle`)\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorNotReady`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidResourceHandle`\n\nSee Also\n--------\n:py:obj:`~.cudaStreamCreate`, :py:obj:`~.cudaStreamCreateWithFlags`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cudaStreamSynchronize`, :py:obj:`~.cudaStreamAddCallback`, :py:obj:`~.cudaStreamDestroy`, :py:obj:`~.cudaMallocManaged`, :py:obj:`~.cuStreamAttachMemAsync`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_109cudaStreamAttachMemAsync = {"cudaStreamAttachMemAsync", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_109cudaStreamAttachMemAsync, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_108cudaStreamAttachMemAsync}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_109cudaStreamAttachMemAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_stream = 0; PyObject *__pyx_v_devPtr = 0; size_t __pyx_v_length; unsigned int __pyx_v_flags; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[4] = {0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaStreamAttachMemAsync (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_stream,&__pyx_mstate_global->__pyx_n_u_devPtr_2,&__pyx_mstate_global->__pyx_n_u_length,&__pyx_mstate_global->__pyx_n_u_flags_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 16978, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 16978, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 16978, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 16978, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16978, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaStreamAttachMemAsync", 0) < (0)) __PYX_ERR(0, 16978, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 4; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaStreamAttachMemAsync", 1, 4, 4, i); __PYX_ERR(0, 16978, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 4)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 16978, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 16978, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 16978, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 16978, __pyx_L3_error) } __pyx_v_stream = values[0]; __pyx_v_devPtr = values[1]; __pyx_v_length = __Pyx_PyLong_As_size_t(values[2]); if (unlikely((__pyx_v_length == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 16979, __pyx_L3_error) __pyx_v_flags = __Pyx_PyLong_As_unsigned_int(values[3]); if (unlikely((__pyx_v_flags == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 16979, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaStreamAttachMemAsync", 1, 4, 4, __pyx_nargs); __PYX_ERR(0, 16978, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamAttachMemAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_108cudaStreamAttachMemAsync(__pyx_self, __pyx_v_stream, __pyx_v_devPtr, __pyx_v_length, __pyx_v_flags); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_108cudaStreamAttachMemAsync(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_stream, PyObject *__pyx_v_devPtr, size_t __pyx_v_length, unsigned int __pyx_v_flags) { cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cydevPtr = NULL; void *__pyx_v_cydevPtr_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaStreamAttachMemAsync", 0); /* "cuda/bindings/runtime.pyx":17073 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":17074 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":17073 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":17075 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":17076 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17076, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":17075 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":17078 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * cydevPtr = _HelperInputVoidPtr(devPtr) */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_stream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17078, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17078, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":17079 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 17079, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":17080 * pstream = int(cudaStream_t(stream)) * cystream = pstream * cydevPtr = _HelperInputVoidPtr(devPtr) # <<<<<<<<<<<<<< * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_devPtr}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17080, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_cydevPtr = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":17081 * cystream = pstream * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaStreamAttachMemAsync(cystream, cydevPtr_ptr, length, flags) */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cydevPtr), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17081, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_5); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 17081, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_cydevPtr_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":17082 * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamAttachMemAsync(cystream, cydevPtr_ptr, length, flags) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":17083 * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: * err = cyruntime.cudaStreamAttachMemAsync(cystream, cydevPtr_ptr, length, flags) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaStreamAttachMemAsync(__pyx_v_cystream, __pyx_v_cydevPtr_ptr, __pyx_v_length, __pyx_v_flags); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 17083, __pyx_L7_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":17082 * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamAttachMemAsync(cystream, cydevPtr_ptr, length, flags) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":17084 * with nogil: * err = cyruntime.cudaStreamAttachMemAsync(cystream, cydevPtr_ptr, length, flags) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 17084, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":16978 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamAttachMemAsync(stream, devPtr, size_t length, unsigned int flags): * """ Attach memory to a stream asynchronously. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamAttachMemAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XDECREF((PyObject *)__pyx_v_cydevPtr); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":17086 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamBeginCapture(stream, mode not None : cudaStreamCaptureMode): * """ Begins graph capture on a stream. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_111cudaStreamBeginCapture(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_110cudaStreamBeginCapture, "cudaStreamBeginCapture(stream, mode: cudaStreamCaptureMode)\n\nBegins graph capture on a stream.\n\nBegin graph capture on `stream`. When a stream is in capture mode, all\noperations pushed into the stream will not be executed, but will\ninstead be captured into a graph, which will be returned via\n:py:obj:`~.cudaStreamEndCapture`. Capture may not be initiated if\n`stream` is :py:obj:`~.cudaStreamLegacy`. Capture must be ended on the\nsame stream in which it was initiated, and it may only be initiated if\nthe stream is not already in capture mode. The capture mode may be\nqueried via :py:obj:`~.cudaStreamIsCapturing`. A unique id representing\nthe capture sequence may be queried via\n:py:obj:`~.cudaStreamGetCaptureInfo`.\n\nIf `mode` is not :py:obj:`~.cudaStreamCaptureModeRelaxed`,\n:py:obj:`~.cudaStreamEndCapture` must be called on this stream from the\nsame thread.\n\nParameters\n----------\nstream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Stream in which to initiate capture\nmode : :py:obj:`~.cudaStreamCaptureMode`\n Controls the interaction of this capture sequence with other API\n calls that are potentially unsafe. For more details see\n :py:obj:`~.cudaThreadExchangeStreamCaptureMode`.\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\n\nSee Also\n--------\n:py:obj:`~.cudaStreamCreate`, :py:obj:`~.cudaStreamIsCapturing`, :py:obj:`~.cudaStreamEndCapture`, :py:obj:`~.cudaThreadExchangeStreamCaptureMode`\n\nNotes\n-----\nKernels captured using this API must not use texture and surface references. Reading or writing through any texture or surface reference is undefined behavior. This restriction does not apply to texture and surface objects."); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_111cudaStreamBeginCapture = {"cudaStreamBeginCapture", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_111cudaStreamBeginCapture, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_110cudaStreamBeginCapture}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_111cudaStreamBeginCapture(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_stream = 0; PyObject *__pyx_v_mode = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaStreamBeginCapture (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_stream,&__pyx_mstate_global->__pyx_n_u_mode,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 17086, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 17086, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17086, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaStreamBeginCapture", 0) < (0)) __PYX_ERR(0, 17086, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaStreamBeginCapture", 1, 2, 2, i); __PYX_ERR(0, 17086, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17086, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 17086, __pyx_L3_error) } __pyx_v_stream = values[0]; __pyx_v_mode = values[1]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaStreamBeginCapture", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 17086, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamBeginCapture", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_mode) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "mode"); __PYX_ERR(0, 17087, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_110cudaStreamBeginCapture(__pyx_self, __pyx_v_stream, __pyx_v_mode); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_110cudaStreamBeginCapture(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_stream, PyObject *__pyx_v_mode) { cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; enum cudaStreamCaptureMode __pyx_v_cymode; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; enum cudaStreamCaptureMode __pyx_t_8; cudaError_t __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaStreamBeginCapture", 0); /* "cuda/bindings/runtime.pyx":17128 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":17129 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":17128 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":17130 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":17131 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17131, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":17130 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":17133 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * cdef cyruntime.cudaStreamCaptureMode cymode = mode.value */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_stream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17133, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17133, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":17134 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * cdef cyruntime.cudaStreamCaptureMode cymode = mode.value * with nogil: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 17134, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":17135 * pstream = int(cudaStream_t(stream)) * cystream = pstream * cdef cyruntime.cudaStreamCaptureMode cymode = mode.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaStreamBeginCapture(cystream, cymode) */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_mode, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17135, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_8 = ((enum cudaStreamCaptureMode)__Pyx_PyLong_As_enum__cudaStreamCaptureMode(__pyx_t_5)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 17135, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_cymode = __pyx_t_8; /* "cuda/bindings/runtime.pyx":17136 * cystream = pstream * cdef cyruntime.cudaStreamCaptureMode cymode = mode.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamBeginCapture(cystream, cymode) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":17137 * cdef cyruntime.cudaStreamCaptureMode cymode = mode.value * with nogil: * err = cyruntime.cudaStreamBeginCapture(cystream, cymode) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_9 = __pyx_f_4cuda_8bindings_9cyruntime_cudaStreamBeginCapture(__pyx_v_cystream, __pyx_v_cymode); if (unlikely(__pyx_t_9 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 17137, __pyx_L7_error) __pyx_v_err = __pyx_t_9; } /* "cuda/bindings/runtime.pyx":17136 * cystream = pstream * cdef cyruntime.cudaStreamCaptureMode cymode = mode.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamBeginCapture(cystream, cymode) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":17138 * with nogil: * err = cyruntime.cudaStreamBeginCapture(cystream, cymode) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17138, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17138, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17138, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17138, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 17138, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":17086 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamBeginCapture(stream, mode not None : cudaStreamCaptureMode): * """ Begins graph capture on a stream. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamBeginCapture", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":17140 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamBeginCaptureToGraph(stream, graph, dependencies : Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], dependencyData : Optional[tuple[cudaGraphEdgeData] | list[cudaGraphEdgeData]], size_t numDependencies, mode not None : cudaStreamCaptureMode): * """ Begins graph capture on a stream to an existing graph. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_113cudaStreamBeginCaptureToGraph(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_112cudaStreamBeginCaptureToGraph, "cudaStreamBeginCaptureToGraph(stream, graph, dependencies: Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], dependencyData: Optional[tuple[cudaGraphEdgeData] | list[cudaGraphEdgeData]], size_t numDependencies, mode: cudaStreamCaptureMode)\n\nBegins graph capture on a stream to an existing graph.\n\nBegin graph capture on `stream`. When a stream is in capture mode, all\noperations pushed into the stream will not be executed, but will\ninstead be captured into `graph`, which will be returned via\n:py:obj:`~.cudaStreamEndCapture`.\n\nCapture may not be initiated if `stream` is\n:py:obj:`~.cudaStreamLegacy`. Capture must be ended on the same stream\nin which it was initiated, and it may only be initiated if the stream\nis not already in capture mode. The capture mode may be queried via\n:py:obj:`~.cudaStreamIsCapturing`. A unique id representing the capture\nsequence may be queried via :py:obj:`~.cudaStreamGetCaptureInfo`.\n\nIf `mode` is not :py:obj:`~.cudaStreamCaptureModeRelaxed`,\n:py:obj:`~.cudaStreamEndCapture` must be called on this stream from the\nsame thread.\n\nParameters\n----------\nstream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Stream in which to initiate capture.\ngraph : :py:obj:`~.CUgraph` or :py:obj:`~.cudaGraph_t`\n Graph to capture into.\ndependencies : list[:py:obj:`~.cudaGraphNode_t`]\n Dependencies of the first node captured in the stream. Can be NULL\n if numDependencies is 0.\ndependencyData : list[:py:obj:`~.cudaGraphEdgeData`]\n Optional array of data associated with each dependency.\nnumDependencies : size_t\n Number of dependencies.\nmode : :py:obj:`~.cudaStreamCaptureMode`\n Controls the interaction of this capture sequence with other API\n calls that are potentially unsafe. For more details see\n :py:obj:`~.cudaThreadExchangeStreamCaptureMode`.\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\n\nSee Also\n--------\n:py:obj:`~.cudaStreamCreate`,"" :py:obj:`~.cudaStreamIsCapturing`, :py:obj:`~.cudaStreamEndCapture`, :py:obj:`~.cudaThreadExchangeStreamCaptureMode`\n\nNotes\n-----\nKernels captured using this API must not use texture and surface references. Reading or writing through any texture or surface reference is undefined behavior. This restriction does not apply to texture and surface objects."); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_113cudaStreamBeginCaptureToGraph = {"cudaStreamBeginCaptureToGraph", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_113cudaStreamBeginCaptureToGraph, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_112cudaStreamBeginCaptureToGraph}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_113cudaStreamBeginCaptureToGraph(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_stream = 0; PyObject *__pyx_v_graph = 0; PyObject *__pyx_v_dependencies = 0; PyObject *__pyx_v_dependencyData = 0; size_t __pyx_v_numDependencies; PyObject *__pyx_v_mode = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[6] = {0,0,0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaStreamBeginCaptureToGraph (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_stream,&__pyx_mstate_global->__pyx_n_u_graph_2,&__pyx_mstate_global->__pyx_n_u_dependencies,&__pyx_mstate_global->__pyx_n_u_dependencyData,&__pyx_mstate_global->__pyx_n_u_numDependencies,&__pyx_mstate_global->__pyx_n_u_mode,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 17140, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 6: values[5] = __Pyx_ArgRef_FASTCALL(__pyx_args, 5); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[5])) __PYX_ERR(0, 17140, __pyx_L3_error) CYTHON_FALLTHROUGH; case 5: values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 17140, __pyx_L3_error) CYTHON_FALLTHROUGH; case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 17140, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 17140, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 17140, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17140, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaStreamBeginCaptureToGraph", 0) < (0)) __PYX_ERR(0, 17140, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 6; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaStreamBeginCaptureToGraph", 1, 6, 6, i); __PYX_ERR(0, 17140, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 6)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17140, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 17140, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 17140, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 17140, __pyx_L3_error) values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 17140, __pyx_L3_error) values[5] = __Pyx_ArgRef_FASTCALL(__pyx_args, 5); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[5])) __PYX_ERR(0, 17140, __pyx_L3_error) } __pyx_v_stream = values[0]; __pyx_v_graph = values[1]; __pyx_v_dependencies = values[2]; __pyx_v_dependencyData = values[3]; __pyx_v_numDependencies = __Pyx_PyLong_As_size_t(values[4]); if (unlikely((__pyx_v_numDependencies == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 17141, __pyx_L3_error) __pyx_v_mode = values[5]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaStreamBeginCaptureToGraph", 1, 6, 6, __pyx_nargs); __PYX_ERR(0, 17140, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamBeginCaptureToGraph", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_mode) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "mode"); __PYX_ERR(0, 17141, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_112cudaStreamBeginCaptureToGraph(__pyx_self, __pyx_v_stream, __pyx_v_graph, __pyx_v_dependencies, __pyx_v_dependencyData, __pyx_v_numDependencies, __pyx_v_mode); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_29cudaStreamBeginCaptureToGraph_2generator78(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value); /* proto */ /* "cuda/bindings/runtime.pyx":17192 * """ * dependencyData = [] if dependencyData is None else dependencyData * if not all(isinstance(_x, (cudaGraphEdgeData,)) for _x in dependencyData): # <<<<<<<<<<<<<< * raise TypeError("Argument 'dependencyData' is not instance of type (expected tuple[cyruntime.cudaGraphEdgeData,] or list[cyruntime.cudaGraphEdgeData,]") * dependencies = [] if dependencies is None else dependencies */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_29cudaStreamBeginCaptureToGraph_genexpr(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_genexpr_arg_0) { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_78_genexpr *__pyx_cur_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("genexpr", 0); __pyx_cur_scope = (struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_78_genexpr *)__pyx_tp_new_4cuda_8bindings_7runtime___pyx_scope_struct_78_genexpr(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime___pyx_scope_struct_78_genexpr, __pyx_mstate_global->__pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_78_genexpr *)Py_None); __Pyx_INCREF(Py_None); __PYX_ERR(0, 17192, __pyx_L1_error) } else { __Pyx_GOTREF((PyObject *)__pyx_cur_scope); } __pyx_cur_scope->__pyx_genexpr_arg_0 = __pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_cur_scope->__pyx_genexpr_arg_0); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_genexpr_arg_0); { __pyx_CoroutineObject *gen = __Pyx_Generator_New((__pyx_coroutine_body_t) __pyx_gb_4cuda_8bindings_7runtime_29cudaStreamBeginCaptureToGraph_2generator78, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[78]), (PyObject *) __pyx_cur_scope, __pyx_mstate_global->__pyx_n_u_genexpr, __pyx_mstate_global->__pyx_n_u_cudaStreamBeginCaptureToGraph_lo, __pyx_mstate_global->__pyx_n_u_cuda_bindings_runtime); if (unlikely(!gen)) __PYX_ERR(0, 17192, __pyx_L1_error) __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; } /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamBeginCaptureToGraph.genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_DECREF((PyObject *)__pyx_cur_scope); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_29cudaStreamBeginCaptureToGraph_2generator78(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value) /* generator body */ { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_78_genexpr *__pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_78_genexpr *)__pyx_generator->closure); PyObject *__pyx_r = NULL; PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *(*__pyx_t_3)(PyObject *); PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("genexpr", 0); switch (__pyx_generator->resume_label) { case 0: goto __pyx_L3_first_run; default: /* CPython raises the right error here */ __Pyx_RefNannyFinishContext(); return NULL; } __pyx_L3_first_run:; if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 17192, __pyx_L1_error) if (unlikely(!__pyx_cur_scope->__pyx_genexpr_arg_0)) { __Pyx_RaiseUnboundLocalError(".0"); __PYX_ERR(0, 17192, __pyx_L1_error) } if (likely(PyList_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) || PyTuple_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) { __pyx_t_1 = __pyx_cur_scope->__pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; __pyx_t_3 = NULL; } else { __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_cur_scope->__pyx_genexpr_arg_0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17192, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_3)) { if (likely(PyList_CheckExact(__pyx_t_1))) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 17192, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } __pyx_t_4 = __Pyx_PyList_GetItemRef(__pyx_t_1, __pyx_t_2); ++__pyx_t_2; } else { { Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 17192, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2)); #else __pyx_t_4 = __Pyx_PySequence_ITEM(__pyx_t_1, __pyx_t_2); #endif ++__pyx_t_2; } if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17192, __pyx_L1_error) } else { __pyx_t_4 = __pyx_t_3(__pyx_t_1); if (unlikely(!__pyx_t_4)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 17192, __pyx_L1_error) PyErr_Clear(); } break; } } __Pyx_GOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v__x); __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v__x, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __Pyx_TypeCheck(__pyx_cur_scope->__pyx_v__x, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphEdgeData); __pyx_t_6 = (!__pyx_t_5); if (__pyx_t_6) { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_False); __pyx_r = Py_False; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_True); __pyx_r = Py_True; goto __pyx_L0; } CYTHON_MAYBE_UNUSED_VAR(__pyx_cur_scope); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); if (__Pyx_PyErr_Occurred()) { __Pyx_Generator_Replace_StopIteration(0); __Pyx_AddTraceback("genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); } __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); #if !CYTHON_USE_EXC_INFO_STACK __Pyx_Coroutine_ResetAndClearException(__pyx_generator); #endif __pyx_generator->resume_label = -1; __Pyx_Coroutine_clear((PyObject*)__pyx_generator); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_29cudaStreamBeginCaptureToGraph_5generator79(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value); /* proto */ /* "cuda/bindings/runtime.pyx":17195 * raise TypeError("Argument 'dependencyData' is not instance of type (expected tuple[cyruntime.cudaGraphEdgeData,] or list[cyruntime.cudaGraphEdgeData,]") * dependencies = [] if dependencies is None else dependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in dependencies): # <<<<<<<<<<<<<< * raise TypeError("Argument 'dependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_29cudaStreamBeginCaptureToGraph_3genexpr(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_genexpr_arg_0) { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_79_genexpr *__pyx_cur_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("genexpr", 0); __pyx_cur_scope = (struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_79_genexpr *)__pyx_tp_new_4cuda_8bindings_7runtime___pyx_scope_struct_79_genexpr(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime___pyx_scope_struct_79_genexpr, __pyx_mstate_global->__pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_79_genexpr *)Py_None); __Pyx_INCREF(Py_None); __PYX_ERR(0, 17195, __pyx_L1_error) } else { __Pyx_GOTREF((PyObject *)__pyx_cur_scope); } __pyx_cur_scope->__pyx_genexpr_arg_0 = __pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_cur_scope->__pyx_genexpr_arg_0); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_genexpr_arg_0); { __pyx_CoroutineObject *gen = __Pyx_Generator_New((__pyx_coroutine_body_t) __pyx_gb_4cuda_8bindings_7runtime_29cudaStreamBeginCaptureToGraph_5generator79, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[79]), (PyObject *) __pyx_cur_scope, __pyx_mstate_global->__pyx_n_u_genexpr, __pyx_mstate_global->__pyx_n_u_cudaStreamBeginCaptureToGraph_lo, __pyx_mstate_global->__pyx_n_u_cuda_bindings_runtime); if (unlikely(!gen)) __PYX_ERR(0, 17195, __pyx_L1_error) __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; } /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamBeginCaptureToGraph.genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_DECREF((PyObject *)__pyx_cur_scope); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_29cudaStreamBeginCaptureToGraph_5generator79(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value) /* generator body */ { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_79_genexpr *__pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_79_genexpr *)__pyx_generator->closure); PyObject *__pyx_r = NULL; PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *(*__pyx_t_3)(PyObject *); PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("genexpr", 0); switch (__pyx_generator->resume_label) { case 0: goto __pyx_L3_first_run; default: /* CPython raises the right error here */ __Pyx_RefNannyFinishContext(); return NULL; } __pyx_L3_first_run:; if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 17195, __pyx_L1_error) if (unlikely(!__pyx_cur_scope->__pyx_genexpr_arg_0)) { __Pyx_RaiseUnboundLocalError(".0"); __PYX_ERR(0, 17195, __pyx_L1_error) } if (likely(PyList_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) || PyTuple_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) { __pyx_t_1 = __pyx_cur_scope->__pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; __pyx_t_3 = NULL; } else { __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_cur_scope->__pyx_genexpr_arg_0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17195, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17195, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_3)) { if (likely(PyList_CheckExact(__pyx_t_1))) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 17195, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } __pyx_t_4 = __Pyx_PyList_GetItemRef(__pyx_t_1, __pyx_t_2); ++__pyx_t_2; } else { { Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 17195, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2)); #else __pyx_t_4 = __Pyx_PySequence_ITEM(__pyx_t_1, __pyx_t_2); #endif ++__pyx_t_2; } if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17195, __pyx_L1_error) } else { __pyx_t_4 = __pyx_t_3(__pyx_t_1); if (unlikely(!__pyx_t_4)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 17195, __pyx_L1_error) PyErr_Clear(); } break; } } __Pyx_GOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v__x); __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v__x, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_TypeCheck(__pyx_cur_scope->__pyx_v__x, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); if (!__pyx_t_6) { } else { __pyx_t_5 = __pyx_t_6; goto __pyx_L7_bool_binop_done; } __pyx_t_6 = __Pyx_TypeCheck(__pyx_cur_scope->__pyx_v__x, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraphNode); __pyx_t_5 = __pyx_t_6; __pyx_L7_bool_binop_done:; __pyx_t_6 = (!__pyx_t_5); if (__pyx_t_6) { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_False); __pyx_r = Py_False; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_True); __pyx_r = Py_True; goto __pyx_L0; } CYTHON_MAYBE_UNUSED_VAR(__pyx_cur_scope); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); if (__Pyx_PyErr_Occurred()) { __Pyx_Generator_Replace_StopIteration(0); __Pyx_AddTraceback("genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); } __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); #if !CYTHON_USE_EXC_INFO_STACK __Pyx_Coroutine_ResetAndClearException(__pyx_generator); #endif __pyx_generator->resume_label = -1; __Pyx_Coroutine_clear((PyObject*)__pyx_generator); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":17140 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamBeginCaptureToGraph(stream, graph, dependencies : Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], dependencyData : Optional[tuple[cudaGraphEdgeData] | list[cudaGraphEdgeData]], size_t numDependencies, mode not None : cudaStreamCaptureMode): * """ Begins graph capture on a stream to an existing graph. */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_112cudaStreamBeginCaptureToGraph(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_stream, PyObject *__pyx_v_graph, PyObject *__pyx_v_dependencies, PyObject *__pyx_v_dependencyData, size_t __pyx_v_numDependencies, PyObject *__pyx_v_mode) { cudaGraph_t __pyx_v_cygraph; PyObject *__pyx_v_pgraph = NULL; cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; cudaGraphNode_t *__pyx_v_cydependencies; Py_ssize_t __pyx_v_idx; cudaGraphEdgeData *__pyx_v_cydependencyData; enum cudaStreamCaptureMode __pyx_v_cymode; cudaError_t __pyx_v_err; PyObject *__pyx_gb_4cuda_8bindings_7runtime_29cudaStreamBeginCaptureToGraph_2generator78 = 0; PyObject *__pyx_gb_4cuda_8bindings_7runtime_29cudaStreamBeginCaptureToGraph_5generator79 = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; Py_ssize_t __pyx_t_8; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; Py_ssize_t __pyx_t_12; Py_ssize_t __pyx_t_13; struct cudaGraphEdgeData_st *__pyx_t_14; enum cudaStreamCaptureMode __pyx_t_15; cudaError_t __pyx_t_16; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaStreamBeginCaptureToGraph", 0); __Pyx_INCREF(__pyx_v_dependencies); __Pyx_INCREF(__pyx_v_dependencyData); /* "cuda/bindings/runtime.pyx":17191 * Kernels captured using this API must not use texture and surface references. Reading or writing through any texture or surface reference is undefined behavior. This restriction does not apply to texture and surface objects. * """ * dependencyData = [] if dependencyData is None else dependencyData # <<<<<<<<<<<<<< * if not all(isinstance(_x, (cudaGraphEdgeData,)) for _x in dependencyData): * raise TypeError("Argument 'dependencyData' is not instance of type (expected tuple[cyruntime.cudaGraphEdgeData,] or list[cyruntime.cudaGraphEdgeData,]") */ __pyx_t_2 = (__pyx_v_dependencyData == Py_None); if (__pyx_t_2) { __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17191, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = __pyx_t_3; __pyx_t_3 = 0; } else { __Pyx_INCREF(__pyx_v_dependencyData); __pyx_t_1 = __pyx_v_dependencyData; } __Pyx_DECREF_SET(__pyx_v_dependencyData, __pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":17192 * """ * dependencyData = [] if dependencyData is None else dependencyData * if not all(isinstance(_x, (cudaGraphEdgeData,)) for _x in dependencyData): # <<<<<<<<<<<<<< * raise TypeError("Argument 'dependencyData' is not instance of type (expected tuple[cyruntime.cudaGraphEdgeData,] or list[cyruntime.cudaGraphEdgeData,]") * dependencies = [] if dependencies is None else dependencies */ __pyx_t_1 = __pyx_pf_4cuda_8bindings_7runtime_29cudaStreamBeginCaptureToGraph_genexpr(NULL, __pyx_v_dependencyData); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_Generator_GetInlinedResult(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 17192, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_4 = (!__pyx_t_2); if (unlikely(__pyx_t_4)) { /* "cuda/bindings/runtime.pyx":17193 * dependencyData = [] if dependencyData is None else dependencyData * if not all(isinstance(_x, (cudaGraphEdgeData,)) for _x in dependencyData): * raise TypeError("Argument 'dependencyData' is not instance of type (expected tuple[cyruntime.cudaGraphEdgeData,] or list[cyruntime.cudaGraphEdgeData,]") # <<<<<<<<<<<<<< * dependencies = [] if dependencies is None else dependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in dependencies): */ __pyx_t_1 = NULL; __Pyx_INCREF(__pyx_builtin_TypeError); __pyx_t_5 = __pyx_builtin_TypeError; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_Argument_dependencyData_is_not_i}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17193, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 17193, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":17192 * """ * dependencyData = [] if dependencyData is None else dependencyData * if not all(isinstance(_x, (cudaGraphEdgeData,)) for _x in dependencyData): # <<<<<<<<<<<<<< * raise TypeError("Argument 'dependencyData' is not instance of type (expected tuple[cyruntime.cudaGraphEdgeData,] or list[cyruntime.cudaGraphEdgeData,]") * dependencies = [] if dependencies is None else dependencies */ } /* "cuda/bindings/runtime.pyx":17194 * if not all(isinstance(_x, (cudaGraphEdgeData,)) for _x in dependencyData): * raise TypeError("Argument 'dependencyData' is not instance of type (expected tuple[cyruntime.cudaGraphEdgeData,] or list[cyruntime.cudaGraphEdgeData,]") * dependencies = [] if dependencies is None else dependencies # <<<<<<<<<<<<<< * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in dependencies): * raise TypeError("Argument 'dependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") */ __pyx_t_4 = (__pyx_v_dependencies == Py_None); if (__pyx_t_4) { __pyx_t_5 = PyList_New(0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17194, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __pyx_t_5; __pyx_t_5 = 0; } else { __Pyx_INCREF(__pyx_v_dependencies); __pyx_t_3 = __pyx_v_dependencies; } __Pyx_DECREF_SET(__pyx_v_dependencies, __pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":17195 * raise TypeError("Argument 'dependencyData' is not instance of type (expected tuple[cyruntime.cudaGraphEdgeData,] or list[cyruntime.cudaGraphEdgeData,]") * dependencies = [] if dependencies is None else dependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in dependencies): # <<<<<<<<<<<<<< * raise TypeError("Argument 'dependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph */ __pyx_t_3 = __pyx_pf_4cuda_8bindings_7runtime_29cudaStreamBeginCaptureToGraph_3genexpr(NULL, __pyx_v_dependencies); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17195, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_Generator_GetInlinedResult(__pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17195, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 17195, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_2 = (!__pyx_t_4); if (unlikely(__pyx_t_2)) { /* "cuda/bindings/runtime.pyx":17196 * dependencies = [] if dependencies is None else dependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in dependencies): * raise TypeError("Argument 'dependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") # <<<<<<<<<<<<<< * cdef cyruntime.cudaGraph_t cygraph * if graph is None: */ __pyx_t_3 = NULL; __Pyx_INCREF(__pyx_builtin_TypeError); __pyx_t_1 = __pyx_builtin_TypeError; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Argument_dependencies_is_not_ins}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17196, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); } __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(0, 17196, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":17195 * raise TypeError("Argument 'dependencyData' is not instance of type (expected tuple[cyruntime.cudaGraphEdgeData,] or list[cyruntime.cudaGraphEdgeData,]") * dependencies = [] if dependencies is None else dependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in dependencies): # <<<<<<<<<<<<<< * raise TypeError("Argument 'dependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph */ } /* "cuda/bindings/runtime.pyx":17198 * raise TypeError("Argument 'dependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph * if graph is None: # <<<<<<<<<<<<<< * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): */ __pyx_t_2 = (__pyx_v_graph == Py_None); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":17199 * cdef cyruntime.cudaGraph_t cygraph * if graph is None: * pgraph = 0 # <<<<<<<<<<<<<< * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): * pgraph = int(graph) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pgraph = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":17198 * raise TypeError("Argument 'dependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph * if graph is None: # <<<<<<<<<<<<<< * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): */ goto __pyx_L5; } /* "cuda/bindings/runtime.pyx":17200 * if graph is None: * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): # <<<<<<<<<<<<<< * pgraph = int(graph) * else: */ __pyx_t_4 = __Pyx_TypeCheck(__pyx_v_graph, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); if (!__pyx_t_4) { } else { __pyx_t_2 = __pyx_t_4; goto __pyx_L6_bool_binop_done; } __pyx_t_4 = __Pyx_TypeCheck(__pyx_v_graph, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraph); __pyx_t_2 = __pyx_t_4; __pyx_L6_bool_binop_done:; if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":17201 * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): * pgraph = int(graph) # <<<<<<<<<<<<<< * else: * pgraph = int(cudaGraph_t(graph)) */ __pyx_t_5 = __Pyx_PyNumber_Int(__pyx_v_graph); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17201, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_v_pgraph = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":17200 * if graph is None: * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): # <<<<<<<<<<<<<< * pgraph = int(graph) * else: */ goto __pyx_L5; } /* "cuda/bindings/runtime.pyx":17203 * pgraph = int(graph) * else: * pgraph = int(cudaGraph_t(graph)) # <<<<<<<<<<<<<< * cygraph = pgraph * cdef cyruntime.cudaStream_t cystream */ /*else*/ { __pyx_t_1 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_v_graph}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17203, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_t_3 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_5)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17203, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF((PyObject *)__pyx_t_5); __pyx_t_5 = 0; __pyx_v_pgraph = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; } __pyx_L5:; /* "cuda/bindings/runtime.pyx":17204 * else: * pgraph = int(cudaGraph_t(graph)) * cygraph = pgraph # <<<<<<<<<<<<<< * cdef cyruntime.cudaStream_t cystream * if stream is None: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pgraph); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 17204, __pyx_L1_error) __pyx_v_cygraph = ((cudaGraph_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":17206 * cygraph = pgraph * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_2 = (__pyx_v_stream == Py_None); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":17207 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":17206 * cygraph = pgraph * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L8; } /* "cuda/bindings/runtime.pyx":17208 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_4 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_4) { } else { __pyx_t_2 = __pyx_t_4; goto __pyx_L9_bool_binop_done; } __pyx_t_4 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_2 = __pyx_t_4; __pyx_L9_bool_binop_done:; if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":17209 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17209, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":17208 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L8; } /* "cuda/bindings/runtime.pyx":17211 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * cdef cyruntime.cudaGraphNode_t* cydependencies = NULL */ /*else*/ { __pyx_t_5 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_1 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_v_stream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17211, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_1 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17211, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; } __pyx_L8:; /* "cuda/bindings/runtime.pyx":17212 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * cdef cyruntime.cudaGraphNode_t* cydependencies = NULL * if len(dependencies) > 1: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 17212, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":17213 * pstream = int(cudaStream_t(stream)) * cystream = pstream * cdef cyruntime.cudaGraphNode_t* cydependencies = NULL # <<<<<<<<<<<<<< * if len(dependencies) > 1: * cydependencies = calloc(len(dependencies), sizeof(cyruntime.cudaGraphNode_t)) */ __pyx_v_cydependencies = NULL; /* "cuda/bindings/runtime.pyx":17214 * cystream = pstream * cdef cyruntime.cudaGraphNode_t* cydependencies = NULL * if len(dependencies) > 1: # <<<<<<<<<<<<<< * cydependencies = calloc(len(dependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cydependencies is NULL: */ __pyx_t_8 = PyObject_Length(__pyx_v_dependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17214, __pyx_L1_error) __pyx_t_2 = (__pyx_t_8 > 1); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":17215 * cdef cyruntime.cudaGraphNode_t* cydependencies = NULL * if len(dependencies) > 1: * cydependencies = calloc(len(dependencies), sizeof(cyruntime.cudaGraphNode_t)) # <<<<<<<<<<<<<< * if cydependencies is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) */ __pyx_t_8 = PyObject_Length(__pyx_v_dependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17215, __pyx_L1_error) __pyx_v_cydependencies = ((cudaGraphNode_t *)calloc(__pyx_t_8, (sizeof(cudaGraphNode_t)))); /* "cuda/bindings/runtime.pyx":17216 * if len(dependencies) > 1: * cydependencies = calloc(len(dependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cydependencies is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) * else: */ __pyx_t_2 = (__pyx_v_cydependencies == NULL); if (unlikely(__pyx_t_2)) { /* "cuda/bindings/runtime.pyx":17217 * cydependencies = calloc(len(dependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cydependencies is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) # <<<<<<<<<<<<<< * else: * for idx in range(len(dependencies)): */ __pyx_t_3 = NULL; __Pyx_INCREF(__pyx_builtin_MemoryError); __pyx_t_5 = __pyx_builtin_MemoryError; __pyx_t_8 = PyObject_Length(__pyx_v_dependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17217, __pyx_L1_error) __pyx_t_9 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17217, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 17217, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_Failed_to_allocate_length_x_size, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17217, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_x_2); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 17217, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyLong_FromSize_t((sizeof(cudaGraphNode_t))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17217, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_9); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 17217, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_10, __pyx_t_11); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17217, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_9}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17217, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 17217, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":17216 * if len(dependencies) > 1: * cydependencies = calloc(len(dependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cydependencies is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) * else: */ } /* "cuda/bindings/runtime.pyx":17219 * raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) * else: * for idx in range(len(dependencies)): # <<<<<<<<<<<<<< * cydependencies[idx] = (dependencies[idx])._pvt_ptr[0] * elif len(dependencies) == 1: */ /*else*/ { __pyx_t_8 = PyObject_Length(__pyx_v_dependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17219, __pyx_L1_error) __pyx_t_12 = __pyx_t_8; for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { __pyx_v_idx = __pyx_t_13; /* "cuda/bindings/runtime.pyx":17220 * else: * for idx in range(len(dependencies)): * cydependencies[idx] = (dependencies[idx])._pvt_ptr[0] # <<<<<<<<<<<<<< * elif len(dependencies) == 1: * cydependencies = (dependencies[0])._pvt_ptr */ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_dependencies, __pyx_v_idx, Py_ssize_t, 1, PyLong_FromSsize_t, 0, 1, 1, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17220, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); (__pyx_v_cydependencies[__pyx_v_idx]) = ((cudaGraphNode_t)(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *)__pyx_t_1)->__pyx_base._pvt_ptr[0])); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } } /* "cuda/bindings/runtime.pyx":17214 * cystream = pstream * cdef cyruntime.cudaGraphNode_t* cydependencies = NULL * if len(dependencies) > 1: # <<<<<<<<<<<<<< * cydependencies = calloc(len(dependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cydependencies is NULL: */ goto __pyx_L11; } /* "cuda/bindings/runtime.pyx":17221 * for idx in range(len(dependencies)): * cydependencies[idx] = (dependencies[idx])._pvt_ptr[0] * elif len(dependencies) == 1: # <<<<<<<<<<<<<< * cydependencies = (dependencies[0])._pvt_ptr * cdef cyruntime.cudaGraphEdgeData* cydependencyData = NULL */ __pyx_t_8 = PyObject_Length(__pyx_v_dependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17221, __pyx_L1_error) __pyx_t_2 = (__pyx_t_8 == 1); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":17222 * cydependencies[idx] = (dependencies[idx])._pvt_ptr[0] * elif len(dependencies) == 1: * cydependencies = (dependencies[0])._pvt_ptr # <<<<<<<<<<<<<< * cdef cyruntime.cudaGraphEdgeData* cydependencyData = NULL * if len(dependencyData) > 1: */ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_dependencies, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17222, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_cydependencies = ((cudaGraphNode_t *)((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *)__pyx_t_1)->__pyx_base._pvt_ptr); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":17221 * for idx in range(len(dependencies)): * cydependencies[idx] = (dependencies[idx])._pvt_ptr[0] * elif len(dependencies) == 1: # <<<<<<<<<<<<<< * cydependencies = (dependencies[0])._pvt_ptr * cdef cyruntime.cudaGraphEdgeData* cydependencyData = NULL */ } __pyx_L11:; /* "cuda/bindings/runtime.pyx":17223 * elif len(dependencies) == 1: * cydependencies = (dependencies[0])._pvt_ptr * cdef cyruntime.cudaGraphEdgeData* cydependencyData = NULL # <<<<<<<<<<<<<< * if len(dependencyData) > 1: * cydependencyData = calloc(len(dependencyData), sizeof(cyruntime.cudaGraphEdgeData)) */ __pyx_v_cydependencyData = NULL; /* "cuda/bindings/runtime.pyx":17224 * cydependencies = (dependencies[0])._pvt_ptr * cdef cyruntime.cudaGraphEdgeData* cydependencyData = NULL * if len(dependencyData) > 1: # <<<<<<<<<<<<<< * cydependencyData = calloc(len(dependencyData), sizeof(cyruntime.cudaGraphEdgeData)) * if cydependencyData is NULL: */ __pyx_t_8 = PyObject_Length(__pyx_v_dependencyData); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17224, __pyx_L1_error) __pyx_t_2 = (__pyx_t_8 > 1); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":17225 * cdef cyruntime.cudaGraphEdgeData* cydependencyData = NULL * if len(dependencyData) > 1: * cydependencyData = calloc(len(dependencyData), sizeof(cyruntime.cudaGraphEdgeData)) # <<<<<<<<<<<<<< * if cydependencyData is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencyData)) + 'x' + str(sizeof(cyruntime.cudaGraphEdgeData))) */ __pyx_t_8 = PyObject_Length(__pyx_v_dependencyData); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17225, __pyx_L1_error) __pyx_v_cydependencyData = ((cudaGraphEdgeData *)calloc(__pyx_t_8, (sizeof(cudaGraphEdgeData)))); /* "cuda/bindings/runtime.pyx":17226 * if len(dependencyData) > 1: * cydependencyData = calloc(len(dependencyData), sizeof(cyruntime.cudaGraphEdgeData)) * if cydependencyData is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencyData)) + 'x' + str(sizeof(cyruntime.cudaGraphEdgeData))) * for idx in range(len(dependencyData)): */ __pyx_t_2 = (__pyx_v_cydependencyData == NULL); if (unlikely(__pyx_t_2)) { /* "cuda/bindings/runtime.pyx":17227 * cydependencyData = calloc(len(dependencyData), sizeof(cyruntime.cudaGraphEdgeData)) * if cydependencyData is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencyData)) + 'x' + str(sizeof(cyruntime.cudaGraphEdgeData))) # <<<<<<<<<<<<<< * for idx in range(len(dependencyData)): * string.memcpy(&cydependencyData[idx], (dependencyData[idx])._pvt_ptr, sizeof(cyruntime.cudaGraphEdgeData)) */ __pyx_t_5 = NULL; __Pyx_INCREF(__pyx_builtin_MemoryError); __pyx_t_9 = __pyx_builtin_MemoryError; __pyx_t_8 = PyObject_Length(__pyx_v_dependencyData); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17227, __pyx_L1_error) __pyx_t_3 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17227, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 17227, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_Failed_to_allocate_length_x_size, __pyx_t_11); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17227, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_11 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_x_2); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 17227, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyLong_FromSize_t((sizeof(cudaGraphEdgeData))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17227, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 17227, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_11, __pyx_t_10); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17227, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_3}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_9, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17227, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 17227, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":17226 * if len(dependencyData) > 1: * cydependencyData = calloc(len(dependencyData), sizeof(cyruntime.cudaGraphEdgeData)) * if cydependencyData is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencyData)) + 'x' + str(sizeof(cyruntime.cudaGraphEdgeData))) * for idx in range(len(dependencyData)): */ } /* "cuda/bindings/runtime.pyx":17228 * if cydependencyData is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencyData)) + 'x' + str(sizeof(cyruntime.cudaGraphEdgeData))) * for idx in range(len(dependencyData)): # <<<<<<<<<<<<<< * string.memcpy(&cydependencyData[idx], (dependencyData[idx])._pvt_ptr, sizeof(cyruntime.cudaGraphEdgeData)) * elif len(dependencyData) == 1: */ __pyx_t_8 = PyObject_Length(__pyx_v_dependencyData); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17228, __pyx_L1_error) __pyx_t_12 = __pyx_t_8; for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { __pyx_v_idx = __pyx_t_13; /* "cuda/bindings/runtime.pyx":17229 * raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencyData)) + 'x' + str(sizeof(cyruntime.cudaGraphEdgeData))) * for idx in range(len(dependencyData)): * string.memcpy(&cydependencyData[idx], (dependencyData[idx])._pvt_ptr, sizeof(cyruntime.cudaGraphEdgeData)) # <<<<<<<<<<<<<< * elif len(dependencyData) == 1: * cydependencyData = (dependencyData[0])._pvt_ptr */ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_dependencyData, __pyx_v_idx, Py_ssize_t, 1, PyLong_FromSsize_t, 0, 1, 1, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17229, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); (void)(memcpy((&(__pyx_v_cydependencyData[__pyx_v_idx])), ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphEdgeData *)__pyx_t_1)->__pyx_base._pvt_ptr, (sizeof(cudaGraphEdgeData)))); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } /* "cuda/bindings/runtime.pyx":17224 * cydependencies = (dependencies[0])._pvt_ptr * cdef cyruntime.cudaGraphEdgeData* cydependencyData = NULL * if len(dependencyData) > 1: # <<<<<<<<<<<<<< * cydependencyData = calloc(len(dependencyData), sizeof(cyruntime.cudaGraphEdgeData)) * if cydependencyData is NULL: */ goto __pyx_L15; } /* "cuda/bindings/runtime.pyx":17230 * for idx in range(len(dependencyData)): * string.memcpy(&cydependencyData[idx], (dependencyData[idx])._pvt_ptr, sizeof(cyruntime.cudaGraphEdgeData)) * elif len(dependencyData) == 1: # <<<<<<<<<<<<<< * cydependencyData = (dependencyData[0])._pvt_ptr * if numDependencies > len(dependencies): raise RuntimeError("List is too small: " + str(len(dependencies)) + " < " + str(numDependencies)) */ __pyx_t_8 = PyObject_Length(__pyx_v_dependencyData); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17230, __pyx_L1_error) __pyx_t_2 = (__pyx_t_8 == 1); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":17231 * string.memcpy(&cydependencyData[idx], (dependencyData[idx])._pvt_ptr, sizeof(cyruntime.cudaGraphEdgeData)) * elif len(dependencyData) == 1: * cydependencyData = (dependencyData[0])._pvt_ptr # <<<<<<<<<<<<<< * if numDependencies > len(dependencies): raise RuntimeError("List is too small: " + str(len(dependencies)) + " < " + str(numDependencies)) * cdef cyruntime.cudaStreamCaptureMode cymode = mode.value */ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_dependencyData, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17231, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_14 = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphEdgeData *)__pyx_t_1)->__pyx_base._pvt_ptr; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cydependencyData = __pyx_t_14; /* "cuda/bindings/runtime.pyx":17230 * for idx in range(len(dependencyData)): * string.memcpy(&cydependencyData[idx], (dependencyData[idx])._pvt_ptr, sizeof(cyruntime.cudaGraphEdgeData)) * elif len(dependencyData) == 1: # <<<<<<<<<<<<<< * cydependencyData = (dependencyData[0])._pvt_ptr * if numDependencies > len(dependencies): raise RuntimeError("List is too small: " + str(len(dependencies)) + " < " + str(numDependencies)) */ } __pyx_L15:; /* "cuda/bindings/runtime.pyx":17232 * elif len(dependencyData) == 1: * cydependencyData = (dependencyData[0])._pvt_ptr * if numDependencies > len(dependencies): raise RuntimeError("List is too small: " + str(len(dependencies)) + " < " + str(numDependencies)) # <<<<<<<<<<<<<< * cdef cyruntime.cudaStreamCaptureMode cymode = mode.value * with nogil: */ __pyx_t_8 = PyObject_Length(__pyx_v_dependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17232, __pyx_L1_error) __pyx_t_2 = (__pyx_v_numDependencies > ((size_t)__pyx_t_8)); if (unlikely(__pyx_t_2)) { __pyx_t_9 = NULL; __Pyx_INCREF(__pyx_builtin_RuntimeError); __pyx_t_3 = __pyx_builtin_RuntimeError; __pyx_t_8 = PyObject_Length(__pyx_v_dependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17232, __pyx_L1_error) __pyx_t_5 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17232, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 17232, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_List_is_too_small, __pyx_t_10); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17232, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_5, __pyx_mstate_global->__pyx_kp_u__6); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 17232, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyLong_FromSize_t(__pyx_v_numDependencies); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17232, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 17232, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_10, __pyx_t_11); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17232, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_9, __pyx_t_5}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17232, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 17232, __pyx_L1_error) } /* "cuda/bindings/runtime.pyx":17233 * cydependencyData = (dependencyData[0])._pvt_ptr * if numDependencies > len(dependencies): raise RuntimeError("List is too small: " + str(len(dependencies)) + " < " + str(numDependencies)) * cdef cyruntime.cudaStreamCaptureMode cymode = mode.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaStreamBeginCaptureToGraph(cystream, cygraph, cydependencies, cydependencyData, numDependencies, cymode) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_mode, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17233, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_15 = ((enum cudaStreamCaptureMode)__Pyx_PyLong_As_enum__cudaStreamCaptureMode(__pyx_t_1)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 17233, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cymode = __pyx_t_15; /* "cuda/bindings/runtime.pyx":17234 * if numDependencies > len(dependencies): raise RuntimeError("List is too small: " + str(len(dependencies)) + " < " + str(numDependencies)) * cdef cyruntime.cudaStreamCaptureMode cymode = mode.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamBeginCaptureToGraph(cystream, cygraph, cydependencies, cydependencyData, numDependencies, cymode) * if len(dependencies) > 1 and cydependencies is not NULL: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":17235 * cdef cyruntime.cudaStreamCaptureMode cymode = mode.value * with nogil: * err = cyruntime.cudaStreamBeginCaptureToGraph(cystream, cygraph, cydependencies, cydependencyData, numDependencies, cymode) # <<<<<<<<<<<<<< * if len(dependencies) > 1 and cydependencies is not NULL: * free(cydependencies) */ __pyx_t_16 = __pyx_f_4cuda_8bindings_9cyruntime_cudaStreamBeginCaptureToGraph(__pyx_v_cystream, __pyx_v_cygraph, __pyx_v_cydependencies, __pyx_v_cydependencyData, __pyx_v_numDependencies, __pyx_v_cymode); if (unlikely(__pyx_t_16 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 17235, __pyx_L21_error) __pyx_v_err = __pyx_t_16; } /* "cuda/bindings/runtime.pyx":17234 * if numDependencies > len(dependencies): raise RuntimeError("List is too small: " + str(len(dependencies)) + " < " + str(numDependencies)) * cdef cyruntime.cudaStreamCaptureMode cymode = mode.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamBeginCaptureToGraph(cystream, cygraph, cydependencies, cydependencyData, numDependencies, cymode) * if len(dependencies) > 1 and cydependencies is not NULL: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L22; } __pyx_L21_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L22:; } } /* "cuda/bindings/runtime.pyx":17236 * with nogil: * err = cyruntime.cudaStreamBeginCaptureToGraph(cystream, cygraph, cydependencies, cydependencyData, numDependencies, cymode) * if len(dependencies) > 1 and cydependencies is not NULL: # <<<<<<<<<<<<<< * free(cydependencies) * if len(dependencyData) > 1 and cydependencyData is not NULL: */ __pyx_t_8 = PyObject_Length(__pyx_v_dependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17236, __pyx_L1_error) __pyx_t_4 = (__pyx_t_8 > 1); if (__pyx_t_4) { } else { __pyx_t_2 = __pyx_t_4; goto __pyx_L24_bool_binop_done; } __pyx_t_4 = (__pyx_v_cydependencies != NULL); __pyx_t_2 = __pyx_t_4; __pyx_L24_bool_binop_done:; if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":17237 * err = cyruntime.cudaStreamBeginCaptureToGraph(cystream, cygraph, cydependencies, cydependencyData, numDependencies, cymode) * if len(dependencies) > 1 and cydependencies is not NULL: * free(cydependencies) # <<<<<<<<<<<<<< * if len(dependencyData) > 1 and cydependencyData is not NULL: * free(cydependencyData) */ free(__pyx_v_cydependencies); /* "cuda/bindings/runtime.pyx":17236 * with nogil: * err = cyruntime.cudaStreamBeginCaptureToGraph(cystream, cygraph, cydependencies, cydependencyData, numDependencies, cymode) * if len(dependencies) > 1 and cydependencies is not NULL: # <<<<<<<<<<<<<< * free(cydependencies) * if len(dependencyData) > 1 and cydependencyData is not NULL: */ } /* "cuda/bindings/runtime.pyx":17238 * if len(dependencies) > 1 and cydependencies is not NULL: * free(cydependencies) * if len(dependencyData) > 1 and cydependencyData is not NULL: # <<<<<<<<<<<<<< * free(cydependencyData) * return (_dict_cudaError_t[err],) */ __pyx_t_8 = PyObject_Length(__pyx_v_dependencyData); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17238, __pyx_L1_error) __pyx_t_4 = (__pyx_t_8 > 1); if (__pyx_t_4) { } else { __pyx_t_2 = __pyx_t_4; goto __pyx_L27_bool_binop_done; } __pyx_t_4 = (__pyx_v_cydependencyData != NULL); __pyx_t_2 = __pyx_t_4; __pyx_L27_bool_binop_done:; if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":17239 * free(cydependencies) * if len(dependencyData) > 1 and cydependencyData is not NULL: * free(cydependencyData) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ free(__pyx_v_cydependencyData); /* "cuda/bindings/runtime.pyx":17238 * if len(dependencies) > 1 and cydependencies is not NULL: * free(cydependencies) * if len(dependencyData) > 1 and cydependencyData is not NULL: # <<<<<<<<<<<<<< * free(cydependencyData) * return (_dict_cudaError_t[err],) */ } /* "cuda/bindings/runtime.pyx":17240 * if len(dependencyData) > 1 and cydependencyData is not NULL: * free(cydependencyData) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 17240, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":17140 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamBeginCaptureToGraph(stream, graph, dependencies : Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], dependencyData : Optional[tuple[cudaGraphEdgeData] | list[cudaGraphEdgeData]], size_t numDependencies, mode not None : cudaStreamCaptureMode): * """ Begins graph capture on a stream to an existing graph. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamBeginCaptureToGraph", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pgraph); __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XDECREF(__pyx_gb_4cuda_8bindings_7runtime_29cudaStreamBeginCaptureToGraph_2generator78); __Pyx_XDECREF(__pyx_gb_4cuda_8bindings_7runtime_29cudaStreamBeginCaptureToGraph_5generator79); __Pyx_XDECREF(__pyx_v_dependencies); __Pyx_XDECREF(__pyx_v_dependencyData); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":17242 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaThreadExchangeStreamCaptureMode(mode not None : cudaStreamCaptureMode): * """ Swaps the stream capture interaction mode for a thread. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_115cudaThreadExchangeStreamCaptureMode(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_114cudaThreadExchangeStreamCaptureMode, "cudaThreadExchangeStreamCaptureMode(mode: cudaStreamCaptureMode)\n\nSwaps the stream capture interaction mode for a thread.\n\nSets the calling thread's stream capture interaction mode to the value\ncontained in `*mode`, and overwrites `*mode` with the previous mode for\nthe thread. To facilitate deterministic behavior across function or\nmodule boundaries, callers are encouraged to use this API in a push-pop\nfashion:\n\n**View CUDA Toolkit Documentation for a C++ code example**\n\nDuring stream capture (see :py:obj:`~.cudaStreamBeginCapture`), some\nactions, such as a call to :py:obj:`~.cudaMalloc`, may be unsafe. In\nthe case of :py:obj:`~.cudaMalloc`, the operation is not enqueued\nasynchronously to a stream, and is not observed by stream capture.\nTherefore, if the sequence of operations captured via\n:py:obj:`~.cudaStreamBeginCapture` depended on the allocation being\nreplayed whenever the graph is launched, the captured graph would be\ninvalid.\n\nTherefore, stream capture places restrictions on API calls that can be\nmade within or concurrently to a\n:py:obj:`~.cudaStreamBeginCapture`-:py:obj:`~.cudaStreamEndCapture`\nsequence. This behavior can be controlled via this API and flags to\n:py:obj:`~.cudaStreamBeginCapture`.\n\nA thread's mode is one of the following:\n\n- `cudaStreamCaptureModeGlobal:` This is the default mode. If the local\n thread has an ongoing capture sequence that was not initiated with\n `cudaStreamCaptureModeRelaxed` at `cuStreamBeginCapture`, or if any\n other thread has a concurrent capture sequence initiated with\n `cudaStreamCaptureModeGlobal`, this thread is prohibited from\n potentially unsafe API calls.\n\n- `cudaStreamCaptureModeThreadLocal:` If the local thread has an\n ongoing capture sequence not initiated with\n `cudaStreamCaptureModeRelaxed`, it is prohibited from potentially\n unsafe API calls. Concurrent capture sequences in other threads are\n ignored.\n\n- `cudaStreamCaptureModeRelaxed:` The local thread is not ""prohibited\n from potentially unsafe API calls. Note that the thread is still\n prohibited from API calls which necessarily conflict with stream\n capture, for example, attempting :py:obj:`~.cudaEventQuery` on an\n event that was last recorded inside a capture sequence.\n\nParameters\n----------\nmode : :py:obj:`~.cudaStreamCaptureMode`\n Pointer to mode value to swap with the current mode\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\nmode : :py:obj:`~.cudaStreamCaptureMode`\n Pointer to mode value to swap with the current mode\n\nSee Also\n--------\n:py:obj:`~.cudaStreamBeginCapture`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_115cudaThreadExchangeStreamCaptureMode = {"cudaThreadExchangeStreamCaptureMode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_115cudaThreadExchangeStreamCaptureMode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_114cudaThreadExchangeStreamCaptureMode}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_115cudaThreadExchangeStreamCaptureMode(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_mode = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaThreadExchangeStreamCaptureMode (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_mode,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 17242, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17242, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaThreadExchangeStreamCaptureMode", 0) < (0)) __PYX_ERR(0, 17242, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaThreadExchangeStreamCaptureMode", 1, 1, 1, i); __PYX_ERR(0, 17242, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17242, __pyx_L3_error) } __pyx_v_mode = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaThreadExchangeStreamCaptureMode", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 17242, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaThreadExchangeStreamCaptureMode", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_mode) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "mode"); __PYX_ERR(0, 17243, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_114cudaThreadExchangeStreamCaptureMode(__pyx_self, __pyx_v_mode); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_114cudaThreadExchangeStreamCaptureMode(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_mode) { enum cudaStreamCaptureMode __pyx_v_cymode; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; enum cudaStreamCaptureMode __pyx_t_2; cudaError_t __pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; size_t __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaThreadExchangeStreamCaptureMode", 0); /* "cuda/bindings/runtime.pyx":17306 * :py:obj:`~.cudaStreamBeginCapture` * """ * cdef cyruntime.cudaStreamCaptureMode cymode = mode.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaThreadExchangeStreamCaptureMode(&cymode) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_mode, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17306, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = ((enum cudaStreamCaptureMode)__Pyx_PyLong_As_enum__cudaStreamCaptureMode(__pyx_t_1)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 17306, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cymode = __pyx_t_2; /* "cuda/bindings/runtime.pyx":17307 * """ * cdef cyruntime.cudaStreamCaptureMode cymode = mode.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaThreadExchangeStreamCaptureMode(&cymode) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":17308 * cdef cyruntime.cudaStreamCaptureMode cymode = mode.value * with nogil: * err = cyruntime.cudaThreadExchangeStreamCaptureMode(&cymode) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_3 = __pyx_f_4cuda_8bindings_9cyruntime_cudaThreadExchangeStreamCaptureMode((&__pyx_v_cymode)); if (unlikely(__pyx_t_3 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 17308, __pyx_L4_error) __pyx_v_err = __pyx_t_3; } /* "cuda/bindings/runtime.pyx":17307 * """ * cdef cyruntime.cudaStreamCaptureMode cymode = mode.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaThreadExchangeStreamCaptureMode(&cymode) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":17309 * with nogil: * err = cyruntime.cudaThreadExchangeStreamCaptureMode(&cymode) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], cudaStreamCaptureMode(cymode)) */ __pyx_t_4 = (__pyx_v_err != cudaSuccess); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":17310 * err = cyruntime.cudaThreadExchangeStreamCaptureMode(&cymode) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], cudaStreamCaptureMode(cymode)) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17310, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17310, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17310, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17310, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 17310, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, Py_None) != (0)) __PYX_ERR(0, 17310, __pyx_L1_error); __pyx_t_6 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":17309 * with nogil: * err = cyruntime.cudaThreadExchangeStreamCaptureMode(&cymode) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], cudaStreamCaptureMode(cymode)) */ } /* "cuda/bindings/runtime.pyx":17311 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], cudaStreamCaptureMode(cymode)) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17311, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17311, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17311, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_t_5 = NULL; __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_mstate_global->__pyx_n_u_cudaStreamCaptureMode); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 17311, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_8 = __Pyx_PyLong_From_enum__cudaStreamCaptureMode(__pyx_v_cymode); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 17311, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_9 = 1; #if CYTHON_UNPACK_METHODS if (unlikely(PyMethod_Check(__pyx_t_7))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_7); assert(__pyx_t_5); PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(__pyx__function); __Pyx_DECREF_SET(__pyx_t_7, __pyx__function); __pyx_t_9 = 0; } #endif { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_8}; __pyx_t_6 = __Pyx_PyObject_FastCall(__pyx_t_7, __pyx_callargs+__pyx_t_9, (2-__pyx_t_9) | (__pyx_t_9*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 17311, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); } __pyx_t_7 = PyTuple_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 17311, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 17311, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_6); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_6) != (0)) __PYX_ERR(0, 17311, __pyx_L1_error); __pyx_t_1 = 0; __pyx_t_6 = 0; __pyx_r = __pyx_t_7; __pyx_t_7 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":17242 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaThreadExchangeStreamCaptureMode(mode not None : cudaStreamCaptureMode): * """ Swaps the stream capture interaction mode for a thread. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("cuda.bindings.runtime.cudaThreadExchangeStreamCaptureMode", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":17313 * return (_dict_cudaError_t[err], cudaStreamCaptureMode(cymode)) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamEndCapture(stream): * """ Ends capture on a stream, returning the captured graph. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_117cudaStreamEndCapture(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_116cudaStreamEndCapture, "cudaStreamEndCapture(stream)\n\nEnds capture on a stream, returning the captured graph.\n\nEnd capture on `stream`, returning the captured graph via `pGraph`.\nCapture must have been initiated on `stream` via a call to\n:py:obj:`~.cudaStreamBeginCapture`. If capture was invalidated, due to\na violation of the rules of stream capture, then a NULL graph will be\nreturned.\n\nIf the `mode` argument to :py:obj:`~.cudaStreamBeginCapture` was not\n:py:obj:`~.cudaStreamCaptureModeRelaxed`, this call must be from the\nsame thread as :py:obj:`~.cudaStreamBeginCapture`.\n\nParameters\n----------\nstream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Stream to query\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorStreamCaptureWrongThread`\npGraph : :py:obj:`~.cudaGraph_t`\n The captured graph\n\nSee Also\n--------\n:py:obj:`~.cudaStreamCreate`, :py:obj:`~.cudaStreamBeginCapture`, :py:obj:`~.cudaStreamIsCapturing`, :py:obj:`~.cudaGraphDestroy`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_117cudaStreamEndCapture = {"cudaStreamEndCapture", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_117cudaStreamEndCapture, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_116cudaStreamEndCapture}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_117cudaStreamEndCapture(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_stream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaStreamEndCapture (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_stream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 17313, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17313, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaStreamEndCapture", 0) < (0)) __PYX_ERR(0, 17313, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaStreamEndCapture", 1, 1, 1, i); __PYX_ERR(0, 17313, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17313, __pyx_L3_error) } __pyx_v_stream = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaStreamEndCapture", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 17313, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamEndCapture", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_116cudaStreamEndCapture(__pyx_self, __pyx_v_stream); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_116cudaStreamEndCapture(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_stream) { cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraph_t *__pyx_v_pGraph = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaStreamEndCapture", 0); /* "cuda/bindings/runtime.pyx":17344 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":17345 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":17344 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":17346 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":17347 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17347, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":17346 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":17349 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * cdef cudaGraph_t pGraph = cudaGraph_t() */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_stream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17349, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17349, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":17350 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * cdef cudaGraph_t pGraph = cudaGraph_t() * with nogil: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 17350, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":17351 * pstream = int(cudaStream_t(stream)) * cystream = pstream * cdef cudaGraph_t pGraph = cudaGraph_t() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaStreamEndCapture(cystream, pGraph._pvt_ptr) */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, NULL}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (1-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17351, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_pGraph = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraph_t *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":17352 * cystream = pstream * cdef cudaGraph_t pGraph = cudaGraph_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamEndCapture(cystream, pGraph._pvt_ptr) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":17353 * cdef cudaGraph_t pGraph = cudaGraph_t() * with nogil: * err = cyruntime.cudaStreamEndCapture(cystream, pGraph._pvt_ptr) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaStreamEndCapture(__pyx_v_cystream, ((cudaGraph_t *)__pyx_v_pGraph->__pyx_base._pvt_ptr)); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 17353, __pyx_L7_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":17352 * cystream = pstream * cdef cudaGraph_t pGraph = cudaGraph_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamEndCapture(cystream, pGraph._pvt_ptr) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":17354 * with nogil: * err = cyruntime.cudaStreamEndCapture(cystream, pGraph._pvt_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pGraph) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":17355 * err = cyruntime.cudaStreamEndCapture(cystream, pGraph._pvt_ptr) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], pGraph) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17355, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17355, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17355, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17355, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 17355, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, Py_None) != (0)) __PYX_ERR(0, 17355, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":17354 * with nogil: * err = cyruntime.cudaStreamEndCapture(cystream, pGraph._pvt_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pGraph) */ } /* "cuda/bindings/runtime.pyx":17356 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pGraph) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17356, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17356, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17356, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17356, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 17356, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_pGraph); __Pyx_GIVEREF((PyObject *)__pyx_v_pGraph); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, ((PyObject *)__pyx_v_pGraph)) != (0)) __PYX_ERR(0, 17356, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":17313 * return (_dict_cudaError_t[err], cudaStreamCaptureMode(cymode)) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamEndCapture(stream): * """ Ends capture on a stream, returning the captured graph. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamEndCapture", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XDECREF((PyObject *)__pyx_v_pGraph); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":17358 * return (_dict_cudaError_t[err], pGraph) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamIsCapturing(stream): * """ Returns a stream's capture status. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_119cudaStreamIsCapturing(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_118cudaStreamIsCapturing, "cudaStreamIsCapturing(stream)\n\nReturns a stream's capture status.\n\nReturn the capture status of `stream` via `pCaptureStatus`. After a\nsuccessful call, `*pCaptureStatus` will contain one of the following:\n\n- :py:obj:`~.cudaStreamCaptureStatusNone`: The stream is not capturing.\n\n- :py:obj:`~.cudaStreamCaptureStatusActive`: The stream is capturing.\n\n- :py:obj:`~.cudaStreamCaptureStatusInvalidated`: The stream was\n capturing but an error has invalidated the capture sequence. The\n capture sequence must be terminated with\n :py:obj:`~.cudaStreamEndCapture` on the stream where it was initiated\n in order to continue using `stream`.\n\nNote that, if this is called on :py:obj:`~.cudaStreamLegacy` (the \"null\nstream\") while a blocking stream on the same device is capturing, it\nwill return :py:obj:`~.cudaErrorStreamCaptureImplicit` and\n`*pCaptureStatus` is unspecified after the call. The blocking stream\ncapture is not invalidated.\n\nWhen a blocking stream is capturing, the legacy stream is in an\nunusable state until the blocking stream capture is terminated. The\nlegacy stream is not supported for stream capture, but attempted use\nwould have an implicit dependency on the capturing stream(s).\n\nParameters\n----------\nstream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Stream to query\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorStreamCaptureImplicit`\npCaptureStatus : :py:obj:`~.cudaStreamCaptureStatus`\n Returns the stream's capture status\n\nSee Also\n--------\n:py:obj:`~.cudaStreamCreate`, :py:obj:`~.cudaStreamBeginCapture`, :py:obj:`~.cudaStreamEndCapture`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_119cudaStreamIsCapturing = {"cudaStreamIsCapturing", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_119cudaStreamIsCapturing, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_118cudaStreamIsCapturing}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_119cudaStreamIsCapturing(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_stream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaStreamIsCapturing (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_stream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 17358, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17358, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaStreamIsCapturing", 0) < (0)) __PYX_ERR(0, 17358, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaStreamIsCapturing", 1, 1, 1, i); __PYX_ERR(0, 17358, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17358, __pyx_L3_error) } __pyx_v_stream = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaStreamIsCapturing", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 17358, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamIsCapturing", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_118cudaStreamIsCapturing(__pyx_self, __pyx_v_stream); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_118cudaStreamIsCapturing(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_stream) { cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; enum cudaStreamCaptureStatus __pyx_v_pCaptureStatus; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaStreamIsCapturing", 0); /* "cuda/bindings/runtime.pyx":17403 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":17404 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":17403 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":17405 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":17406 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17406, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":17405 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":17408 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * cdef cyruntime.cudaStreamCaptureStatus pCaptureStatus */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_stream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17408, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17408, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":17409 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * cdef cyruntime.cudaStreamCaptureStatus pCaptureStatus * with nogil: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 17409, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":17411 * cystream = pstream * cdef cyruntime.cudaStreamCaptureStatus pCaptureStatus * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamIsCapturing(cystream, &pCaptureStatus) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":17412 * cdef cyruntime.cudaStreamCaptureStatus pCaptureStatus * with nogil: * err = cyruntime.cudaStreamIsCapturing(cystream, &pCaptureStatus) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaStreamIsCapturing(__pyx_v_cystream, (&__pyx_v_pCaptureStatus)); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 17412, __pyx_L7_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":17411 * cystream = pstream * cdef cyruntime.cudaStreamCaptureStatus pCaptureStatus * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamIsCapturing(cystream, &pCaptureStatus) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":17413 * with nogil: * err = cyruntime.cudaStreamIsCapturing(cystream, &pCaptureStatus) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], cudaStreamCaptureStatus(pCaptureStatus)) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":17414 * err = cyruntime.cudaStreamIsCapturing(cystream, &pCaptureStatus) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], cudaStreamCaptureStatus(pCaptureStatus)) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17414, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17414, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17414, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17414, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 17414, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 17414, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":17413 * with nogil: * err = cyruntime.cudaStreamIsCapturing(cystream, &pCaptureStatus) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], cudaStreamCaptureStatus(pCaptureStatus)) */ } /* "cuda/bindings/runtime.pyx":17415 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], cudaStreamCaptureStatus(pCaptureStatus)) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_3 = NULL; __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_cudaStreamCaptureStatus); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = __Pyx_PyLong_From_enum__cudaStreamCaptureStatus(__pyx_v_pCaptureStatus); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 17415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __pyx_t_6 = 1; #if CYTHON_UNPACK_METHODS if (unlikely(PyMethod_Check(__pyx_t_9))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_9); assert(__pyx_t_3); PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_9); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx__function); __Pyx_DECREF_SET(__pyx_t_9, __pyx__function); __pyx_t_6 = 0; } #endif { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_10}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_9, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); } __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 17415, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 17415, __pyx_L1_error); __pyx_t_5 = 0; __pyx_t_4 = 0; __pyx_r = __pyx_t_9; __pyx_t_9 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":17358 * return (_dict_cudaError_t[err], pGraph) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamIsCapturing(stream): * """ Returns a stream's capture status. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamIsCapturing", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":17417 * return (_dict_cudaError_t[err], cudaStreamCaptureStatus(pCaptureStatus)) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamGetCaptureInfo(stream): * """ Query a stream's capture state. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_121cudaStreamGetCaptureInfo(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_120cudaStreamGetCaptureInfo, "cudaStreamGetCaptureInfo(stream)\n\nQuery a stream's capture state.\n\nQuery stream state related to stream capture.\n\nIf called on :py:obj:`~.cudaStreamLegacy` (the \"null stream\") while a\nstream not created with :py:obj:`~.cudaStreamNonBlocking` is capturing,\nreturns :py:obj:`~.cudaErrorStreamCaptureImplicit`.\n\nValid data (other than capture status) is returned only if both of the\nfollowing are true:\n\n- the call returns cudaSuccess\n\n- the returned capture status is\n :py:obj:`~.cudaStreamCaptureStatusActive`\n\nParameters\n----------\nstream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n The stream to query\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorStreamCaptureImplicit`\ncaptureStatus_out : :py:obj:`~.cudaStreamCaptureStatus`\n Location to return the capture status of the stream; required\nid_out : unsigned long long\n Optional location to return an id for the capture sequence, which\n is unique over the lifetime of the process\ngraph_out : :py:obj:`~.cudaGraph_t`\n Optional location to return the graph being captured into. All\n operations other than destroy and node removal are permitted on the\n graph while the capture sequence is in progress. This API does not\n transfer ownership of the graph, which is transferred or destroyed\n at :py:obj:`~.cudaStreamEndCapture`. Note that the graph handle may\n be invalidated before end of capture for certain errors. Nodes that\n are or become unreachable from the original stream at\n :py:obj:`~.cudaStreamEndCapture` due to direct actions on the graph\n do not trigger :py:obj:`~.cudaErrorStreamCaptureUnjoined`.\ndependencies_out : list[:py:obj:`~.cudaGraphNode_t`]\n Optional location to store a pointer to an array of nodes. The next\n node to be captured in the stream will depend on this set of nodes,\n absent operations such as event wait which modify this set. The\n array point""er is valid until the next API call which operates on\n the stream or until the capture is terminated. The node handles may\n be copied out and are valid until they or the graph is destroyed.\n The driver-owned array may also be passed directly to APIs that\n operate on the graph (not the stream) without copying.\nnumDependencies_out : int\n Optional location to store the size of the array returned in\n dependencies_out.\n\nSee Also\n--------\n:py:obj:`~.cudaStreamGetCaptureInfo_v3`, :py:obj:`~.cudaStreamBeginCapture`, :py:obj:`~.cudaStreamIsCapturing`, :py:obj:`~.cudaStreamUpdateCaptureDependencies`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_121cudaStreamGetCaptureInfo = {"cudaStreamGetCaptureInfo", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_121cudaStreamGetCaptureInfo, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_120cudaStreamGetCaptureInfo}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_121cudaStreamGetCaptureInfo(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_stream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaStreamGetCaptureInfo (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_stream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 17417, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17417, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaStreamGetCaptureInfo", 0) < (0)) __PYX_ERR(0, 17417, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaStreamGetCaptureInfo", 1, 1, 1, i); __PYX_ERR(0, 17417, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17417, __pyx_L3_error) } __pyx_v_stream = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaStreamGetCaptureInfo", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 17417, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamGetCaptureInfo", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_120cudaStreamGetCaptureInfo(__pyx_self, __pyx_v_stream); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_120cudaStreamGetCaptureInfo(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_stream) { cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; enum cudaStreamCaptureStatus __pyx_v_captureStatus_out; unsigned PY_LONG_LONG __pyx_v_id_out; struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraph_t *__pyx_v_graph_out = 0; cudaGraphNode_t const *__pyx_v_cydependencies_out; PyObject *__pyx_v_pydependencies_out = NULL; size_t __pyx_v_numDependencies_out; cudaError_t __pyx_v_err; size_t __pyx_10genexpr191__pyx_v_idx; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; PyObject *__pyx_t_9 = NULL; size_t __pyx_t_10; size_t __pyx_t_11; PyObject *__pyx_t_12 = NULL; size_t __pyx_t_13; PyObject *__pyx_t_14 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaStreamGetCaptureInfo", 0); /* "cuda/bindings/runtime.pyx":17477 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":17478 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":17477 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":17479 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":17480 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17480, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":17479 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":17482 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * cdef cyruntime.cudaStreamCaptureStatus captureStatus_out */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_stream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17482, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17482, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":17483 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * cdef cyruntime.cudaStreamCaptureStatus captureStatus_out * cdef unsigned long long id_out = 0 */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 17483, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":17485 * cystream = pstream * cdef cyruntime.cudaStreamCaptureStatus captureStatus_out * cdef unsigned long long id_out = 0 # <<<<<<<<<<<<<< * cdef cudaGraph_t graph_out = cudaGraph_t() * cdef const cyruntime.cudaGraphNode_t* cydependencies_out = NULL */ __pyx_v_id_out = 0; /* "cuda/bindings/runtime.pyx":17486 * cdef cyruntime.cudaStreamCaptureStatus captureStatus_out * cdef unsigned long long id_out = 0 * cdef cudaGraph_t graph_out = cudaGraph_t() # <<<<<<<<<<<<<< * cdef const cyruntime.cudaGraphNode_t* cydependencies_out = NULL * pydependencies_out = [] */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, NULL}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (1-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17486, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_graph_out = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraph_t *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":17487 * cdef unsigned long long id_out = 0 * cdef cudaGraph_t graph_out = cudaGraph_t() * cdef const cyruntime.cudaGraphNode_t* cydependencies_out = NULL # <<<<<<<<<<<<<< * pydependencies_out = [] * cdef size_t numDependencies_out = 0 */ __pyx_v_cydependencies_out = NULL; /* "cuda/bindings/runtime.pyx":17488 * cdef cudaGraph_t graph_out = cudaGraph_t() * cdef const cyruntime.cudaGraphNode_t* cydependencies_out = NULL * pydependencies_out = [] # <<<<<<<<<<<<<< * cdef size_t numDependencies_out = 0 * with nogil: */ __pyx_t_5 = PyList_New(0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17488, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_v_pydependencies_out = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":17489 * cdef const cyruntime.cudaGraphNode_t* cydependencies_out = NULL * pydependencies_out = [] * cdef size_t numDependencies_out = 0 # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaStreamGetCaptureInfo(cystream, &captureStatus_out, &id_out, graph_out._pvt_ptr, &cydependencies_out, &numDependencies_out) */ __pyx_v_numDependencies_out = 0; /* "cuda/bindings/runtime.pyx":17490 * pydependencies_out = [] * cdef size_t numDependencies_out = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamGetCaptureInfo(cystream, &captureStatus_out, &id_out, graph_out._pvt_ptr, &cydependencies_out, &numDependencies_out) * if cudaError_t(err) == cudaError_t(0): */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":17491 * cdef size_t numDependencies_out = 0 * with nogil: * err = cyruntime.cudaStreamGetCaptureInfo(cystream, &captureStatus_out, &id_out, graph_out._pvt_ptr, &cydependencies_out, &numDependencies_out) # <<<<<<<<<<<<<< * if cudaError_t(err) == cudaError_t(0): * pydependencies_out = [cudaGraphNode_t(init_value=cydependencies_out[idx]) for idx in range(numDependencies_out)] */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaStreamGetCaptureInfo(__pyx_v_cystream, (&__pyx_v_captureStatus_out), (&__pyx_v_id_out), ((cudaGraph_t *)__pyx_v_graph_out->__pyx_base._pvt_ptr), (&__pyx_v_cydependencies_out), (&__pyx_v_numDependencies_out)); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 17491, __pyx_L7_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":17490 * pydependencies_out = [] * cdef size_t numDependencies_out = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamGetCaptureInfo(cystream, &captureStatus_out, &id_out, graph_out._pvt_ptr, &cydependencies_out, &numDependencies_out) * if cudaError_t(err) == cudaError_t(0): */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":17492 * with nogil: * err = cyruntime.cudaStreamGetCaptureInfo(cystream, &captureStatus_out, &id_out, graph_out._pvt_ptr, &cydependencies_out, &numDependencies_out) * if cudaError_t(err) == cudaError_t(0): # <<<<<<<<<<<<<< * pydependencies_out = [cudaGraphNode_t(init_value=cydependencies_out[idx]) for idx in range(numDependencies_out)] * if err != cyruntime.cudaSuccess: */ __pyx_t_4 = NULL; __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17492, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_9 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17492, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_6 = 1; #if CYTHON_UNPACK_METHODS if (unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); assert(__pyx_t_4); PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx__function); __Pyx_DECREF_SET(__pyx_t_3, __pyx__function); __pyx_t_6 = 0; } #endif { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_9}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17492, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); } __pyx_t_9 = NULL; __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17492, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = 1; #if CYTHON_UNPACK_METHODS if (unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_4); assert(__pyx_t_9); PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_9); __Pyx_INCREF(__pyx__function); __Pyx_DECREF_SET(__pyx_t_4, __pyx__function); __pyx_t_6 = 0; } #endif { PyObject *__pyx_callargs[2] = {__pyx_t_9, __pyx_mstate_global->__pyx_int_0}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17492, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } __pyx_t_4 = PyObject_RichCompare(__pyx_t_5, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17492, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 17492, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":17493 * err = cyruntime.cudaStreamGetCaptureInfo(cystream, &captureStatus_out, &id_out, graph_out._pvt_ptr, &cydependencies_out, &numDependencies_out) * if cudaError_t(err) == cudaError_t(0): * pydependencies_out = [cudaGraphNode_t(init_value=cydependencies_out[idx]) for idx in range(numDependencies_out)] # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None, None, None, None, None) */ { /* enter inner scope */ __pyx_t_4 = PyList_New(0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17493, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = __pyx_v_numDependencies_out; __pyx_t_10 = __pyx_t_6; for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) { __pyx_10genexpr191__pyx_v_idx = __pyx_t_11; __pyx_t_5 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_9 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_12 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)(__pyx_v_cydependencies_out[__pyx_10genexpr191__pyx_v_idx]))); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 17493, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_12); __pyx_t_13 = 1; { PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_5, NULL}; __pyx_t_14 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 17493, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_init_value, __pyx_t_12, __pyx_t_14, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 17493, __pyx_L1_error) __pyx_t_3 = __Pyx_Object_Vectorcall_CallFromBuilder(__pyx_t_9, __pyx_callargs+__pyx_t_13, (1-__pyx_t_13) | (__pyx_t_13*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_14); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17493, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } if (unlikely(__Pyx_ListComp_Append(__pyx_t_4, (PyObject*)__pyx_t_3))) __PYX_ERR(0, 17493, __pyx_L1_error) __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; } } /* exit inner scope */ __Pyx_DECREF_SET(__pyx_v_pydependencies_out, ((PyObject*)__pyx_t_4)); __pyx_t_4 = 0; /* "cuda/bindings/runtime.pyx":17492 * with nogil: * err = cyruntime.cudaStreamGetCaptureInfo(cystream, &captureStatus_out, &id_out, graph_out._pvt_ptr, &cydependencies_out, &numDependencies_out) * if cudaError_t(err) == cudaError_t(0): # <<<<<<<<<<<<<< * pydependencies_out = [cudaGraphNode_t(init_value=cydependencies_out[idx]) for idx in range(numDependencies_out)] * if err != cyruntime.cudaSuccess: */ } /* "cuda/bindings/runtime.pyx":17494 * if cudaError_t(err) == cudaError_t(0): * pydependencies_out = [cudaGraphNode_t(init_value=cydependencies_out[idx]) for idx in range(numDependencies_out)] * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None, None, None, None, None) * return (_dict_cudaError_t[err], cudaStreamCaptureStatus(captureStatus_out), id_out, graph_out, pydependencies_out, numDependencies_out) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":17495 * pydependencies_out = [cudaGraphNode_t(init_value=cydependencies_out[idx]) for idx in range(numDependencies_out)] * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None, None, None, None, None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], cudaStreamCaptureStatus(captureStatus_out), id_out, graph_out, pydependencies_out, numDependencies_out) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17495, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17495, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_9 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17495, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17495, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_9); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_9) != (0)) __PYX_ERR(0, 17495, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 17495, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 2, Py_None) != (0)) __PYX_ERR(0, 17495, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 3, Py_None) != (0)) __PYX_ERR(0, 17495, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 4, Py_None) != (0)) __PYX_ERR(0, 17495, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 5, Py_None) != (0)) __PYX_ERR(0, 17495, __pyx_L1_error); __pyx_t_9 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":17494 * if cudaError_t(err) == cudaError_t(0): * pydependencies_out = [cudaGraphNode_t(init_value=cydependencies_out[idx]) for idx in range(numDependencies_out)] * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None, None, None, None, None) * return (_dict_cudaError_t[err], cudaStreamCaptureStatus(captureStatus_out), id_out, graph_out, pydependencies_out, numDependencies_out) */ } /* "cuda/bindings/runtime.pyx":17496 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None, None, None, None, None) * return (_dict_cudaError_t[err], cudaStreamCaptureStatus(captureStatus_out), id_out, graph_out, pydependencies_out, numDependencies_out) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17496, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_9 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17496, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_9); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17496, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_3 = NULL; __Pyx_GetModuleGlobalName(__pyx_t_14, __pyx_mstate_global->__pyx_n_u_cudaStreamCaptureStatus); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 17496, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); __pyx_t_12 = __Pyx_PyLong_From_enum__cudaStreamCaptureStatus(__pyx_v_captureStatus_out); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 17496, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_12); __pyx_t_6 = 1; #if CYTHON_UNPACK_METHODS if (unlikely(PyMethod_Check(__pyx_t_14))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_14); assert(__pyx_t_3); PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_14); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx__function); __Pyx_DECREF_SET(__pyx_t_14, __pyx__function); __pyx_t_6 = 0; } #endif { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_12}; __pyx_t_9 = __Pyx_PyObject_FastCall(__pyx_t_14, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17496, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); } __pyx_t_14 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(__pyx_v_id_out); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 17496, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); __pyx_t_12 = __Pyx_PyLong_FromSize_t(__pyx_v_numDependencies_out); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 17496, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_12); __pyx_t_3 = PyTuple_New(6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17496, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 17496, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_9); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_9) != (0)) __PYX_ERR(0, 17496, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_14); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_14) != (0)) __PYX_ERR(0, 17496, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_graph_out); __Pyx_GIVEREF((PyObject *)__pyx_v_graph_out); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 3, ((PyObject *)__pyx_v_graph_out)) != (0)) __PYX_ERR(0, 17496, __pyx_L1_error); __Pyx_INCREF(__pyx_v_pydependencies_out); __Pyx_GIVEREF(__pyx_v_pydependencies_out); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 4, __pyx_v_pydependencies_out) != (0)) __PYX_ERR(0, 17496, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_12); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 5, __pyx_t_12) != (0)) __PYX_ERR(0, 17496, __pyx_L1_error); __pyx_t_4 = 0; __pyx_t_9 = 0; __pyx_t_14 = 0; __pyx_t_12 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":17417 * return (_dict_cudaError_t[err], cudaStreamCaptureStatus(pCaptureStatus)) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamGetCaptureInfo(stream): * """ Query a stream's capture state. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_12); __Pyx_XDECREF(__pyx_t_14); __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamGetCaptureInfo", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XDECREF((PyObject *)__pyx_v_graph_out); __Pyx_XDECREF(__pyx_v_pydependencies_out); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":17498 * return (_dict_cudaError_t[err], cudaStreamCaptureStatus(captureStatus_out), id_out, graph_out, pydependencies_out, numDependencies_out) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamGetCaptureInfo_v3(stream): * """ Query a stream's capture state (12.3+) */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_123cudaStreamGetCaptureInfo_v3(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_122cudaStreamGetCaptureInfo_v3, "cudaStreamGetCaptureInfo_v3(stream)\n\nQuery a stream's capture state (12.3+)\n\nQuery stream state related to stream capture.\n\nIf called on :py:obj:`~.cudaStreamLegacy` (the \"null stream\") while a\nstream not created with :py:obj:`~.cudaStreamNonBlocking` is capturing,\nreturns :py:obj:`~.cudaErrorStreamCaptureImplicit`.\n\nValid data (other than capture status) is returned only if both of the\nfollowing are true:\n\n- the call returns cudaSuccess\n\n- the returned capture status is\n :py:obj:`~.cudaStreamCaptureStatusActive`\n\nIf `edgeData_out` is non-NULL then `dependencies_out` must be as well.\nIf `dependencies_out` is non-NULL and `edgeData_out` is NULL, but there\nis non-zero edge data for one or more of the current stream\ndependencies, the call will return :py:obj:`~.cudaErrorLossyQuery`.\n\nParameters\n----------\nstream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n The stream to query\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorStreamCaptureImplicit`, :py:obj:`~.cudaErrorLossyQuery`\ncaptureStatus_out : :py:obj:`~.cudaStreamCaptureStatus`\n Location to return the capture status of the stream; required\nid_out : unsigned long long\n Optional location to return an id for the capture sequence, which\n is unique over the lifetime of the process\ngraph_out : :py:obj:`~.cudaGraph_t`\n Optional location to return the graph being captured into. All\n operations other than destroy and node removal are permitted on the\n graph while the capture sequence is in progress. This API does not\n transfer ownership of the graph, which is transferred or destroyed\n at :py:obj:`~.cudaStreamEndCapture`. Note that the graph handle may\n be invalidated before end of capture for certain errors. Nodes that\n are or become unreachable from the original stream at\n :py:obj:`~.cudaStreamEndCapture` due to direct actions on the graph\n do not trigger :py:obj:`""~.cudaErrorStreamCaptureUnjoined`.\ndependencies_out : list[:py:obj:`~.cudaGraphNode_t`]\n Optional location to store a pointer to an array of nodes. The next\n node to be captured in the stream will depend on this set of nodes,\n absent operations such as event wait which modify this set. The\n array pointer is valid until the next API call which operates on\n the stream or until the capture is terminated. The node handles may\n be copied out and are valid until they or the graph is destroyed.\n The driver-owned array may also be passed directly to APIs that\n operate on the graph (not the stream) without copying.\nedgeData_out : list[:py:obj:`~.cudaGraphEdgeData`]\n Optional location to store a pointer to an array of graph edge\n data. This array parallels `dependencies_out`; the next node to be\n added has an edge to `dependencies_out`[i] with annotation\n `edgeData_out`[i] for each `i`. The array pointer is valid until\n the next API call which operates on the stream or until the capture\n is terminated.\nnumDependencies_out : int\n Optional location to store the size of the array returned in\n dependencies_out.\n\nSee Also\n--------\n:py:obj:`~.cudaStreamBeginCapture`, :py:obj:`~.cudaStreamIsCapturing`, :py:obj:`~.cudaStreamUpdateCaptureDependencies`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_123cudaStreamGetCaptureInfo_v3 = {"cudaStreamGetCaptureInfo_v3", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_123cudaStreamGetCaptureInfo_v3, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_122cudaStreamGetCaptureInfo_v3}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_123cudaStreamGetCaptureInfo_v3(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_stream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaStreamGetCaptureInfo_v3 (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_stream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 17498, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17498, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaStreamGetCaptureInfo_v3", 0) < (0)) __PYX_ERR(0, 17498, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaStreamGetCaptureInfo_v3", 1, 1, 1, i); __PYX_ERR(0, 17498, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17498, __pyx_L3_error) } __pyx_v_stream = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaStreamGetCaptureInfo_v3", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 17498, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamGetCaptureInfo_v3", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_122cudaStreamGetCaptureInfo_v3(__pyx_self, __pyx_v_stream); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_122cudaStreamGetCaptureInfo_v3(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_stream) { cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; enum cudaStreamCaptureStatus __pyx_v_captureStatus_out; unsigned PY_LONG_LONG __pyx_v_id_out; struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraph_t *__pyx_v_graph_out = 0; cudaGraphNode_t const *__pyx_v_cydependencies_out; PyObject *__pyx_v_pydependencies_out = NULL; cudaGraphEdgeData const *__pyx_v_cyedgeData_out; PyObject *__pyx_v_pyedgeData_out = NULL; size_t __pyx_v_numDependencies_out; cudaError_t __pyx_v_err; size_t __pyx_10genexpr192__pyx_v_idx; size_t __pyx_10genexpr193__pyx_v_idx; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; PyObject *__pyx_t_9 = NULL; size_t __pyx_t_10; size_t __pyx_t_11; PyObject *__pyx_t_12 = NULL; size_t __pyx_t_13; PyObject *__pyx_t_14 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaStreamGetCaptureInfo_v3", 0); /* "cuda/bindings/runtime.pyx":17570 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":17571 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":17570 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":17572 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":17573 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17573, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":17572 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":17575 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * cdef cyruntime.cudaStreamCaptureStatus captureStatus_out */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_stream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17575, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17575, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":17576 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * cdef cyruntime.cudaStreamCaptureStatus captureStatus_out * cdef unsigned long long id_out = 0 */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 17576, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":17578 * cystream = pstream * cdef cyruntime.cudaStreamCaptureStatus captureStatus_out * cdef unsigned long long id_out = 0 # <<<<<<<<<<<<<< * cdef cudaGraph_t graph_out = cudaGraph_t() * cdef const cyruntime.cudaGraphNode_t* cydependencies_out = NULL */ __pyx_v_id_out = 0; /* "cuda/bindings/runtime.pyx":17579 * cdef cyruntime.cudaStreamCaptureStatus captureStatus_out * cdef unsigned long long id_out = 0 * cdef cudaGraph_t graph_out = cudaGraph_t() # <<<<<<<<<<<<<< * cdef const cyruntime.cudaGraphNode_t* cydependencies_out = NULL * pydependencies_out = [] */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, NULL}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (1-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17579, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_graph_out = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraph_t *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":17580 * cdef unsigned long long id_out = 0 * cdef cudaGraph_t graph_out = cudaGraph_t() * cdef const cyruntime.cudaGraphNode_t* cydependencies_out = NULL # <<<<<<<<<<<<<< * pydependencies_out = [] * cdef const cyruntime.cudaGraphEdgeData* cyedgeData_out = NULL */ __pyx_v_cydependencies_out = NULL; /* "cuda/bindings/runtime.pyx":17581 * cdef cudaGraph_t graph_out = cudaGraph_t() * cdef const cyruntime.cudaGraphNode_t* cydependencies_out = NULL * pydependencies_out = [] # <<<<<<<<<<<<<< * cdef const cyruntime.cudaGraphEdgeData* cyedgeData_out = NULL * pyedgeData_out = [] */ __pyx_t_5 = PyList_New(0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17581, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_v_pydependencies_out = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":17582 * cdef const cyruntime.cudaGraphNode_t* cydependencies_out = NULL * pydependencies_out = [] * cdef const cyruntime.cudaGraphEdgeData* cyedgeData_out = NULL # <<<<<<<<<<<<<< * pyedgeData_out = [] * cdef size_t numDependencies_out = 0 */ __pyx_v_cyedgeData_out = NULL; /* "cuda/bindings/runtime.pyx":17583 * pydependencies_out = [] * cdef const cyruntime.cudaGraphEdgeData* cyedgeData_out = NULL * pyedgeData_out = [] # <<<<<<<<<<<<<< * cdef size_t numDependencies_out = 0 * with nogil: */ __pyx_t_5 = PyList_New(0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17583, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_v_pyedgeData_out = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":17584 * cdef const cyruntime.cudaGraphEdgeData* cyedgeData_out = NULL * pyedgeData_out = [] * cdef size_t numDependencies_out = 0 # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaStreamGetCaptureInfo_v3(cystream, &captureStatus_out, &id_out, graph_out._pvt_ptr, &cydependencies_out, &cyedgeData_out, &numDependencies_out) */ __pyx_v_numDependencies_out = 0; /* "cuda/bindings/runtime.pyx":17585 * pyedgeData_out = [] * cdef size_t numDependencies_out = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamGetCaptureInfo_v3(cystream, &captureStatus_out, &id_out, graph_out._pvt_ptr, &cydependencies_out, &cyedgeData_out, &numDependencies_out) * if cudaError_t(err) == cudaError_t(0): */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":17586 * cdef size_t numDependencies_out = 0 * with nogil: * err = cyruntime.cudaStreamGetCaptureInfo_v3(cystream, &captureStatus_out, &id_out, graph_out._pvt_ptr, &cydependencies_out, &cyedgeData_out, &numDependencies_out) # <<<<<<<<<<<<<< * if cudaError_t(err) == cudaError_t(0): * pydependencies_out = [cudaGraphNode_t(init_value=cydependencies_out[idx]) for idx in range(numDependencies_out)] */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaStreamGetCaptureInfo_v3(__pyx_v_cystream, (&__pyx_v_captureStatus_out), (&__pyx_v_id_out), ((cudaGraph_t *)__pyx_v_graph_out->__pyx_base._pvt_ptr), (&__pyx_v_cydependencies_out), (&__pyx_v_cyedgeData_out), (&__pyx_v_numDependencies_out)); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 17586, __pyx_L7_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":17585 * pyedgeData_out = [] * cdef size_t numDependencies_out = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamGetCaptureInfo_v3(cystream, &captureStatus_out, &id_out, graph_out._pvt_ptr, &cydependencies_out, &cyedgeData_out, &numDependencies_out) * if cudaError_t(err) == cudaError_t(0): */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":17587 * with nogil: * err = cyruntime.cudaStreamGetCaptureInfo_v3(cystream, &captureStatus_out, &id_out, graph_out._pvt_ptr, &cydependencies_out, &cyedgeData_out, &numDependencies_out) * if cudaError_t(err) == cudaError_t(0): # <<<<<<<<<<<<<< * pydependencies_out = [cudaGraphNode_t(init_value=cydependencies_out[idx]) for idx in range(numDependencies_out)] * if cudaError_t(err) == cudaError_t(0): */ __pyx_t_4 = NULL; __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17587, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_9 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17587, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_6 = 1; #if CYTHON_UNPACK_METHODS if (unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); assert(__pyx_t_4); PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx__function); __Pyx_DECREF_SET(__pyx_t_3, __pyx__function); __pyx_t_6 = 0; } #endif { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_9}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17587, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); } __pyx_t_9 = NULL; __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17587, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = 1; #if CYTHON_UNPACK_METHODS if (unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_4); assert(__pyx_t_9); PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_9); __Pyx_INCREF(__pyx__function); __Pyx_DECREF_SET(__pyx_t_4, __pyx__function); __pyx_t_6 = 0; } #endif { PyObject *__pyx_callargs[2] = {__pyx_t_9, __pyx_mstate_global->__pyx_int_0}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17587, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } __pyx_t_4 = PyObject_RichCompare(__pyx_t_5, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17587, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 17587, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":17588 * err = cyruntime.cudaStreamGetCaptureInfo_v3(cystream, &captureStatus_out, &id_out, graph_out._pvt_ptr, &cydependencies_out, &cyedgeData_out, &numDependencies_out) * if cudaError_t(err) == cudaError_t(0): * pydependencies_out = [cudaGraphNode_t(init_value=cydependencies_out[idx]) for idx in range(numDependencies_out)] # <<<<<<<<<<<<<< * if cudaError_t(err) == cudaError_t(0): * pyedgeData_out = [cudaGraphEdgeData(_ptr=&cyedgeData_out[idx]) for idx in range(numDependencies_out)] */ { /* enter inner scope */ __pyx_t_4 = PyList_New(0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17588, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = __pyx_v_numDependencies_out; __pyx_t_10 = __pyx_t_6; for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) { __pyx_10genexpr192__pyx_v_idx = __pyx_t_11; __pyx_t_5 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_9 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_12 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)(__pyx_v_cydependencies_out[__pyx_10genexpr192__pyx_v_idx]))); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 17588, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_12); __pyx_t_13 = 1; { PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_5, NULL}; __pyx_t_14 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 17588, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_init_value, __pyx_t_12, __pyx_t_14, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 17588, __pyx_L1_error) __pyx_t_3 = __Pyx_Object_Vectorcall_CallFromBuilder(__pyx_t_9, __pyx_callargs+__pyx_t_13, (1-__pyx_t_13) | (__pyx_t_13*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_14); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17588, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } if (unlikely(__Pyx_ListComp_Append(__pyx_t_4, (PyObject*)__pyx_t_3))) __PYX_ERR(0, 17588, __pyx_L1_error) __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; } } /* exit inner scope */ __Pyx_DECREF_SET(__pyx_v_pydependencies_out, ((PyObject*)__pyx_t_4)); __pyx_t_4 = 0; /* "cuda/bindings/runtime.pyx":17587 * with nogil: * err = cyruntime.cudaStreamGetCaptureInfo_v3(cystream, &captureStatus_out, &id_out, graph_out._pvt_ptr, &cydependencies_out, &cyedgeData_out, &numDependencies_out) * if cudaError_t(err) == cudaError_t(0): # <<<<<<<<<<<<<< * pydependencies_out = [cudaGraphNode_t(init_value=cydependencies_out[idx]) for idx in range(numDependencies_out)] * if cudaError_t(err) == cudaError_t(0): */ } /* "cuda/bindings/runtime.pyx":17589 * if cudaError_t(err) == cudaError_t(0): * pydependencies_out = [cudaGraphNode_t(init_value=cydependencies_out[idx]) for idx in range(numDependencies_out)] * if cudaError_t(err) == cudaError_t(0): # <<<<<<<<<<<<<< * pyedgeData_out = [cudaGraphEdgeData(_ptr=&cyedgeData_out[idx]) for idx in range(numDependencies_out)] * if err != cyruntime.cudaSuccess: */ __pyx_t_3 = NULL; __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_cudaError_t); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17589, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_14 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 17589, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); __pyx_t_6 = 1; #if CYTHON_UNPACK_METHODS if (unlikely(PyMethod_Check(__pyx_t_9))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_9); assert(__pyx_t_3); PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_9); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx__function); __Pyx_DECREF_SET(__pyx_t_9, __pyx__function); __pyx_t_6 = 0; } #endif { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_14}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_9, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17589, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); } __pyx_t_14 = NULL; __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17589, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_6 = 1; #if CYTHON_UNPACK_METHODS if (unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_14 = PyMethod_GET_SELF(__pyx_t_3); assert(__pyx_t_14); PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_14); __Pyx_INCREF(__pyx__function); __Pyx_DECREF_SET(__pyx_t_3, __pyx__function); __pyx_t_6 = 0; } #endif { PyObject *__pyx_callargs[2] = {__pyx_t_14, __pyx_mstate_global->__pyx_int_0}; __pyx_t_9 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_14); __pyx_t_14 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17589, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); } __pyx_t_3 = PyObject_RichCompare(__pyx_t_4, __pyx_t_9, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17589, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 17589, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":17590 * pydependencies_out = [cudaGraphNode_t(init_value=cydependencies_out[idx]) for idx in range(numDependencies_out)] * if cudaError_t(err) == cudaError_t(0): * pyedgeData_out = [cudaGraphEdgeData(_ptr=&cyedgeData_out[idx]) for idx in range(numDependencies_out)] # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None, None, None, None, None, None) */ { /* enter inner scope */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17590, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_6 = __pyx_v_numDependencies_out; __pyx_t_10 = __pyx_t_6; for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) { __pyx_10genexpr193__pyx_v_idx = __pyx_t_11; __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphEdgeData); __pyx_t_14 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphEdgeData); __pyx_t_12 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(((__pyx_t_4cuda_8bindings_7runtime_void_ptr)(&(__pyx_v_cyedgeData_out[__pyx_10genexpr193__pyx_v_idx])))); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 17590, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_12); __pyx_t_13 = 1; { PyObject *__pyx_callargs[2 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_4, NULL}; __pyx_t_5 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17590, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_ptr_2, __pyx_t_12, __pyx_t_5, __pyx_callargs+1, 0) < (0)) __PYX_ERR(0, 17590, __pyx_L1_error) __pyx_t_9 = __Pyx_Object_Vectorcall_CallFromBuilder(__pyx_t_14, __pyx_callargs+__pyx_t_13, (1-__pyx_t_13) | (__pyx_t_13*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_5); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17590, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_9); } if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_9))) __PYX_ERR(0, 17590, __pyx_L1_error) __Pyx_DECREF((PyObject *)__pyx_t_9); __pyx_t_9 = 0; } } /* exit inner scope */ __Pyx_DECREF_SET(__pyx_v_pyedgeData_out, ((PyObject*)__pyx_t_3)); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":17589 * if cudaError_t(err) == cudaError_t(0): * pydependencies_out = [cudaGraphNode_t(init_value=cydependencies_out[idx]) for idx in range(numDependencies_out)] * if cudaError_t(err) == cudaError_t(0): # <<<<<<<<<<<<<< * pyedgeData_out = [cudaGraphEdgeData(_ptr=&cyedgeData_out[idx]) for idx in range(numDependencies_out)] * if err != cyruntime.cudaSuccess: */ } /* "cuda/bindings/runtime.pyx":17591 * if cudaError_t(err) == cudaError_t(0): * pyedgeData_out = [cudaGraphEdgeData(_ptr=&cyedgeData_out[idx]) for idx in range(numDependencies_out)] * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None, None, None, None, None, None) * return (_dict_cudaError_t[err], cudaStreamCaptureStatus(captureStatus_out), id_out, graph_out, pydependencies_out, pyedgeData_out, numDependencies_out) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":17592 * pyedgeData_out = [cudaGraphEdgeData(_ptr=&cyedgeData_out[idx]) for idx in range(numDependencies_out)] * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None, None, None, None, None, None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], cudaStreamCaptureStatus(captureStatus_out), id_out, graph_out, pydependencies_out, pyedgeData_out, numDependencies_out) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17592, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_9 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17592, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_14 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_9); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 17592, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = PyTuple_New(7); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17592, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_14); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_14) != (0)) __PYX_ERR(0, 17592, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 1, Py_None) != (0)) __PYX_ERR(0, 17592, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 2, Py_None) != (0)) __PYX_ERR(0, 17592, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 3, Py_None) != (0)) __PYX_ERR(0, 17592, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 4, Py_None) != (0)) __PYX_ERR(0, 17592, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 5, Py_None) != (0)) __PYX_ERR(0, 17592, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 6, Py_None) != (0)) __PYX_ERR(0, 17592, __pyx_L1_error); __pyx_t_14 = 0; __pyx_r = __pyx_t_9; __pyx_t_9 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":17591 * if cudaError_t(err) == cudaError_t(0): * pyedgeData_out = [cudaGraphEdgeData(_ptr=&cyedgeData_out[idx]) for idx in range(numDependencies_out)] * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None, None, None, None, None, None) * return (_dict_cudaError_t[err], cudaStreamCaptureStatus(captureStatus_out), id_out, graph_out, pydependencies_out, pyedgeData_out, numDependencies_out) */ } /* "cuda/bindings/runtime.pyx":17593 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None, None, None, None, None, None) * return (_dict_cudaError_t[err], cudaStreamCaptureStatus(captureStatus_out), id_out, graph_out, pydependencies_out, pyedgeData_out, numDependencies_out) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17593, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_14 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 17593, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_9, __pyx_t_14); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17593, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; __pyx_t_9 = NULL; __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_cudaStreamCaptureStatus); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17593, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_12 = __Pyx_PyLong_From_enum__cudaStreamCaptureStatus(__pyx_v_captureStatus_out); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 17593, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_12); __pyx_t_6 = 1; #if CYTHON_UNPACK_METHODS if (unlikely(PyMethod_Check(__pyx_t_5))) { __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_5); assert(__pyx_t_9); PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_9); __Pyx_INCREF(__pyx__function); __Pyx_DECREF_SET(__pyx_t_5, __pyx__function); __pyx_t_6 = 0; } #endif { PyObject *__pyx_callargs[2] = {__pyx_t_9, __pyx_t_12}; __pyx_t_14 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 17593, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_14); } __pyx_t_5 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(__pyx_v_id_out); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17593, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_12 = __Pyx_PyLong_FromSize_t(__pyx_v_numDependencies_out); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 17593, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_12); __pyx_t_9 = PyTuple_New(7); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17593, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 17593, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_14); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 1, __pyx_t_14) != (0)) __PYX_ERR(0, 17593, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 2, __pyx_t_5) != (0)) __PYX_ERR(0, 17593, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_graph_out); __Pyx_GIVEREF((PyObject *)__pyx_v_graph_out); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 3, ((PyObject *)__pyx_v_graph_out)) != (0)) __PYX_ERR(0, 17593, __pyx_L1_error); __Pyx_INCREF(__pyx_v_pydependencies_out); __Pyx_GIVEREF(__pyx_v_pydependencies_out); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 4, __pyx_v_pydependencies_out) != (0)) __PYX_ERR(0, 17593, __pyx_L1_error); __Pyx_INCREF(__pyx_v_pyedgeData_out); __Pyx_GIVEREF(__pyx_v_pyedgeData_out); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 5, __pyx_v_pyedgeData_out) != (0)) __PYX_ERR(0, 17593, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_12); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 6, __pyx_t_12) != (0)) __PYX_ERR(0, 17593, __pyx_L1_error); __pyx_t_3 = 0; __pyx_t_14 = 0; __pyx_t_5 = 0; __pyx_t_12 = 0; __pyx_r = __pyx_t_9; __pyx_t_9 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":17498 * return (_dict_cudaError_t[err], cudaStreamCaptureStatus(captureStatus_out), id_out, graph_out, pydependencies_out, numDependencies_out) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamGetCaptureInfo_v3(stream): * """ Query a stream's capture state (12.3+) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_12); __Pyx_XDECREF(__pyx_t_14); __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamGetCaptureInfo_v3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XDECREF((PyObject *)__pyx_v_graph_out); __Pyx_XDECREF(__pyx_v_pydependencies_out); __Pyx_XDECREF(__pyx_v_pyedgeData_out); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":17595 * return (_dict_cudaError_t[err], cudaStreamCaptureStatus(captureStatus_out), id_out, graph_out, pydependencies_out, pyedgeData_out, numDependencies_out) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamUpdateCaptureDependencies(stream, dependencies : Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies, unsigned int flags): * """ Update the set of dependencies in a capturing stream (11.3+) */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_125cudaStreamUpdateCaptureDependencies(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_124cudaStreamUpdateCaptureDependencies, "cudaStreamUpdateCaptureDependencies(stream, dependencies: Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies, unsigned int flags)\n\nUpdate the set of dependencies in a capturing stream (11.3+)\n\nModifies the dependency set of a capturing stream. The dependency set\nis the set of nodes that the next captured node in the stream will\ndepend on.\n\nValid flags are :py:obj:`~.cudaStreamAddCaptureDependencies` and\n:py:obj:`~.cudaStreamSetCaptureDependencies`. These control whether the\nset passed to the API is added to the existing set or replaces it. A\nflags value of 0 defaults to\n:py:obj:`~.cudaStreamAddCaptureDependencies`.\n\nNodes that are removed from the dependency set via this API do not\nresult in :py:obj:`~.cudaErrorStreamCaptureUnjoined` if they are\nunreachable from the stream at :py:obj:`~.cudaStreamEndCapture`.\n\nReturns :py:obj:`~.cudaErrorIllegalState` if the stream is not\ncapturing.\n\nThis API is new in CUDA 11.3. Developers requiring compatibility across\nminor versions of the CUDA driver to 11.0 should not use this API or\nprovide a fallback.\n\nParameters\n----------\nstream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n The stream to update\ndependencies : list[:py:obj:`~.cudaGraphNode_t`]\n The set of dependencies to add\nnumDependencies : size_t\n The size of the dependencies array\nflags : unsigned int\n See above\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorIllegalState`\n\nSee Also\n--------\n:py:obj:`~.cudaStreamBeginCapture`, :py:obj:`~.cudaStreamGetCaptureInfo`,"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_125cudaStreamUpdateCaptureDependencies = {"cudaStreamUpdateCaptureDependencies", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_125cudaStreamUpdateCaptureDependencies, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_124cudaStreamUpdateCaptureDependencies}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_125cudaStreamUpdateCaptureDependencies(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_stream = 0; PyObject *__pyx_v_dependencies = 0; size_t __pyx_v_numDependencies; unsigned int __pyx_v_flags; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[4] = {0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaStreamUpdateCaptureDependencies (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_stream,&__pyx_mstate_global->__pyx_n_u_dependencies,&__pyx_mstate_global->__pyx_n_u_numDependencies,&__pyx_mstate_global->__pyx_n_u_flags_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 17595, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 17595, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 17595, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 17595, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17595, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaStreamUpdateCaptureDependencies", 0) < (0)) __PYX_ERR(0, 17595, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 4; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaStreamUpdateCaptureDependencies", 1, 4, 4, i); __PYX_ERR(0, 17595, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 4)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17595, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 17595, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 17595, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 17595, __pyx_L3_error) } __pyx_v_stream = values[0]; __pyx_v_dependencies = values[1]; __pyx_v_numDependencies = __Pyx_PyLong_As_size_t(values[2]); if (unlikely((__pyx_v_numDependencies == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 17596, __pyx_L3_error) __pyx_v_flags = __Pyx_PyLong_As_unsigned_int(values[3]); if (unlikely((__pyx_v_flags == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 17596, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaStreamUpdateCaptureDependencies", 1, 4, 4, __pyx_nargs); __PYX_ERR(0, 17595, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamUpdateCaptureDependencies", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_124cudaStreamUpdateCaptureDependencies(__pyx_self, __pyx_v_stream, __pyx_v_dependencies, __pyx_v_numDependencies, __pyx_v_flags); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_35cudaStreamUpdateCaptureDependencies_2generator80(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value); /* proto */ /* "cuda/bindings/runtime.pyx":17641 * """ * dependencies = [] if dependencies is None else dependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in dependencies): # <<<<<<<<<<<<<< * raise TypeError("Argument 'dependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaStream_t cystream */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_35cudaStreamUpdateCaptureDependencies_genexpr(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_genexpr_arg_0) { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_80_genexpr *__pyx_cur_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("genexpr", 0); __pyx_cur_scope = (struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_80_genexpr *)__pyx_tp_new_4cuda_8bindings_7runtime___pyx_scope_struct_80_genexpr(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime___pyx_scope_struct_80_genexpr, __pyx_mstate_global->__pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_80_genexpr *)Py_None); __Pyx_INCREF(Py_None); __PYX_ERR(0, 17641, __pyx_L1_error) } else { __Pyx_GOTREF((PyObject *)__pyx_cur_scope); } __pyx_cur_scope->__pyx_genexpr_arg_0 = __pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_cur_scope->__pyx_genexpr_arg_0); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_genexpr_arg_0); { __pyx_CoroutineObject *gen = __Pyx_Generator_New((__pyx_coroutine_body_t) __pyx_gb_4cuda_8bindings_7runtime_35cudaStreamUpdateCaptureDependencies_2generator80, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[80]), (PyObject *) __pyx_cur_scope, __pyx_mstate_global->__pyx_n_u_genexpr, __pyx_mstate_global->__pyx_n_u_cudaStreamUpdateCaptureDependenc, __pyx_mstate_global->__pyx_n_u_cuda_bindings_runtime); if (unlikely(!gen)) __PYX_ERR(0, 17641, __pyx_L1_error) __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; } /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamUpdateCaptureDependencies.genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_DECREF((PyObject *)__pyx_cur_scope); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_35cudaStreamUpdateCaptureDependencies_2generator80(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value) /* generator body */ { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_80_genexpr *__pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_80_genexpr *)__pyx_generator->closure); PyObject *__pyx_r = NULL; PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *(*__pyx_t_3)(PyObject *); PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("genexpr", 0); switch (__pyx_generator->resume_label) { case 0: goto __pyx_L3_first_run; default: /* CPython raises the right error here */ __Pyx_RefNannyFinishContext(); return NULL; } __pyx_L3_first_run:; if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 17641, __pyx_L1_error) if (unlikely(!__pyx_cur_scope->__pyx_genexpr_arg_0)) { __Pyx_RaiseUnboundLocalError(".0"); __PYX_ERR(0, 17641, __pyx_L1_error) } if (likely(PyList_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) || PyTuple_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) { __pyx_t_1 = __pyx_cur_scope->__pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; __pyx_t_3 = NULL; } else { __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_cur_scope->__pyx_genexpr_arg_0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17641, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17641, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_3)) { if (likely(PyList_CheckExact(__pyx_t_1))) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 17641, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } __pyx_t_4 = __Pyx_PyList_GetItemRef(__pyx_t_1, __pyx_t_2); ++__pyx_t_2; } else { { Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 17641, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2)); #else __pyx_t_4 = __Pyx_PySequence_ITEM(__pyx_t_1, __pyx_t_2); #endif ++__pyx_t_2; } if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17641, __pyx_L1_error) } else { __pyx_t_4 = __pyx_t_3(__pyx_t_1); if (unlikely(!__pyx_t_4)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 17641, __pyx_L1_error) PyErr_Clear(); } break; } } __Pyx_GOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v__x); __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v__x, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_TypeCheck(__pyx_cur_scope->__pyx_v__x, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); if (!__pyx_t_6) { } else { __pyx_t_5 = __pyx_t_6; goto __pyx_L7_bool_binop_done; } __pyx_t_6 = __Pyx_TypeCheck(__pyx_cur_scope->__pyx_v__x, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraphNode); __pyx_t_5 = __pyx_t_6; __pyx_L7_bool_binop_done:; __pyx_t_6 = (!__pyx_t_5); if (__pyx_t_6) { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_False); __pyx_r = Py_False; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_True); __pyx_r = Py_True; goto __pyx_L0; } CYTHON_MAYBE_UNUSED_VAR(__pyx_cur_scope); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); if (__Pyx_PyErr_Occurred()) { __Pyx_Generator_Replace_StopIteration(0); __Pyx_AddTraceback("genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); } __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); #if !CYTHON_USE_EXC_INFO_STACK __Pyx_Coroutine_ResetAndClearException(__pyx_generator); #endif __pyx_generator->resume_label = -1; __Pyx_Coroutine_clear((PyObject*)__pyx_generator); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":17595 * return (_dict_cudaError_t[err], cudaStreamCaptureStatus(captureStatus_out), id_out, graph_out, pydependencies_out, pyedgeData_out, numDependencies_out) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamUpdateCaptureDependencies(stream, dependencies : Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies, unsigned int flags): * """ Update the set of dependencies in a capturing stream (11.3+) */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_124cudaStreamUpdateCaptureDependencies(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_stream, PyObject *__pyx_v_dependencies, size_t __pyx_v_numDependencies, unsigned int __pyx_v_flags) { cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; cudaGraphNode_t *__pyx_v_cydependencies; Py_ssize_t __pyx_v_idx; cudaError_t __pyx_v_err; PyObject *__pyx_gb_4cuda_8bindings_7runtime_35cudaStreamUpdateCaptureDependencies_2generator80 = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; Py_ssize_t __pyx_t_8; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; Py_ssize_t __pyx_t_12; Py_ssize_t __pyx_t_13; cudaError_t __pyx_t_14; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaStreamUpdateCaptureDependencies", 0); __Pyx_INCREF(__pyx_v_dependencies); /* "cuda/bindings/runtime.pyx":17640 * :py:obj:`~.cudaStreamBeginCapture`, :py:obj:`~.cudaStreamGetCaptureInfo`, * """ * dependencies = [] if dependencies is None else dependencies # <<<<<<<<<<<<<< * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in dependencies): * raise TypeError("Argument 'dependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") */ __pyx_t_2 = (__pyx_v_dependencies == Py_None); if (__pyx_t_2) { __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17640, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = __pyx_t_3; __pyx_t_3 = 0; } else { __Pyx_INCREF(__pyx_v_dependencies); __pyx_t_1 = __pyx_v_dependencies; } __Pyx_DECREF_SET(__pyx_v_dependencies, __pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":17641 * """ * dependencies = [] if dependencies is None else dependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in dependencies): # <<<<<<<<<<<<<< * raise TypeError("Argument 'dependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaStream_t cystream */ __pyx_t_1 = __pyx_pf_4cuda_8bindings_7runtime_35cudaStreamUpdateCaptureDependencies_genexpr(NULL, __pyx_v_dependencies); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17641, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_Generator_GetInlinedResult(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17641, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 17641, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_4 = (!__pyx_t_2); if (unlikely(__pyx_t_4)) { /* "cuda/bindings/runtime.pyx":17642 * dependencies = [] if dependencies is None else dependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in dependencies): * raise TypeError("Argument 'dependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") # <<<<<<<<<<<<<< * cdef cyruntime.cudaStream_t cystream * if stream is None: */ __pyx_t_1 = NULL; __Pyx_INCREF(__pyx_builtin_TypeError); __pyx_t_5 = __pyx_builtin_TypeError; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_Argument_dependencies_is_not_ins}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17642, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 17642, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":17641 * """ * dependencies = [] if dependencies is None else dependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in dependencies): # <<<<<<<<<<<<<< * raise TypeError("Argument 'dependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaStream_t cystream */ } /* "cuda/bindings/runtime.pyx":17644 * raise TypeError("Argument 'dependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_4 = (__pyx_v_stream == Py_None); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":17645 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":17644 * raise TypeError("Argument 'dependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L4; } /* "cuda/bindings/runtime.pyx":17646 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_4 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_4 = __pyx_t_2; __pyx_L5_bool_binop_done:; if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":17647 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17647, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":17646 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L4; } /* "cuda/bindings/runtime.pyx":17649 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * cdef cyruntime.cudaGraphNode_t* cydependencies = NULL */ /*else*/ { __pyx_t_5 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_1 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_v_stream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17649, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_1 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17649, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; } __pyx_L4:; /* "cuda/bindings/runtime.pyx":17650 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * cdef cyruntime.cudaGraphNode_t* cydependencies = NULL * if len(dependencies) > 1: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 17650, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":17651 * pstream = int(cudaStream_t(stream)) * cystream = pstream * cdef cyruntime.cudaGraphNode_t* cydependencies = NULL # <<<<<<<<<<<<<< * if len(dependencies) > 1: * cydependencies = calloc(len(dependencies), sizeof(cyruntime.cudaGraphNode_t)) */ __pyx_v_cydependencies = NULL; /* "cuda/bindings/runtime.pyx":17652 * cystream = pstream * cdef cyruntime.cudaGraphNode_t* cydependencies = NULL * if len(dependencies) > 1: # <<<<<<<<<<<<<< * cydependencies = calloc(len(dependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cydependencies is NULL: */ __pyx_t_8 = PyObject_Length(__pyx_v_dependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17652, __pyx_L1_error) __pyx_t_4 = (__pyx_t_8 > 1); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":17653 * cdef cyruntime.cudaGraphNode_t* cydependencies = NULL * if len(dependencies) > 1: * cydependencies = calloc(len(dependencies), sizeof(cyruntime.cudaGraphNode_t)) # <<<<<<<<<<<<<< * if cydependencies is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) */ __pyx_t_8 = PyObject_Length(__pyx_v_dependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17653, __pyx_L1_error) __pyx_v_cydependencies = ((cudaGraphNode_t *)calloc(__pyx_t_8, (sizeof(cudaGraphNode_t)))); /* "cuda/bindings/runtime.pyx":17654 * if len(dependencies) > 1: * cydependencies = calloc(len(dependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cydependencies is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) * else: */ __pyx_t_4 = (__pyx_v_cydependencies == NULL); if (unlikely(__pyx_t_4)) { /* "cuda/bindings/runtime.pyx":17655 * cydependencies = calloc(len(dependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cydependencies is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) # <<<<<<<<<<<<<< * else: * for idx in range(len(dependencies)): */ __pyx_t_3 = NULL; __Pyx_INCREF(__pyx_builtin_MemoryError); __pyx_t_5 = __pyx_builtin_MemoryError; __pyx_t_8 = PyObject_Length(__pyx_v_dependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17655, __pyx_L1_error) __pyx_t_9 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17655, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 17655, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_Failed_to_allocate_length_x_size, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17655, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_x_2); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 17655, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyLong_FromSize_t((sizeof(cudaGraphNode_t))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17655, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_9); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 17655, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_10, __pyx_t_11); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17655, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_9}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17655, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 17655, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":17654 * if len(dependencies) > 1: * cydependencies = calloc(len(dependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cydependencies is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) * else: */ } /* "cuda/bindings/runtime.pyx":17657 * raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) * else: * for idx in range(len(dependencies)): # <<<<<<<<<<<<<< * cydependencies[idx] = (dependencies[idx])._pvt_ptr[0] * elif len(dependencies) == 1: */ /*else*/ { __pyx_t_8 = PyObject_Length(__pyx_v_dependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17657, __pyx_L1_error) __pyx_t_12 = __pyx_t_8; for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { __pyx_v_idx = __pyx_t_13; /* "cuda/bindings/runtime.pyx":17658 * else: * for idx in range(len(dependencies)): * cydependencies[idx] = (dependencies[idx])._pvt_ptr[0] # <<<<<<<<<<<<<< * elif len(dependencies) == 1: * cydependencies = (dependencies[0])._pvt_ptr */ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_dependencies, __pyx_v_idx, Py_ssize_t, 1, PyLong_FromSsize_t, 0, 1, 1, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); (__pyx_v_cydependencies[__pyx_v_idx]) = ((cudaGraphNode_t)(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *)__pyx_t_1)->__pyx_base._pvt_ptr[0])); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } } /* "cuda/bindings/runtime.pyx":17652 * cystream = pstream * cdef cyruntime.cudaGraphNode_t* cydependencies = NULL * if len(dependencies) > 1: # <<<<<<<<<<<<<< * cydependencies = calloc(len(dependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cydependencies is NULL: */ goto __pyx_L7; } /* "cuda/bindings/runtime.pyx":17659 * for idx in range(len(dependencies)): * cydependencies[idx] = (dependencies[idx])._pvt_ptr[0] * elif len(dependencies) == 1: # <<<<<<<<<<<<<< * cydependencies = (dependencies[0])._pvt_ptr * if numDependencies > len(dependencies): raise RuntimeError("List is too small: " + str(len(dependencies)) + " < " + str(numDependencies)) */ __pyx_t_8 = PyObject_Length(__pyx_v_dependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17659, __pyx_L1_error) __pyx_t_4 = (__pyx_t_8 == 1); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":17660 * cydependencies[idx] = (dependencies[idx])._pvt_ptr[0] * elif len(dependencies) == 1: * cydependencies = (dependencies[0])._pvt_ptr # <<<<<<<<<<<<<< * if numDependencies > len(dependencies): raise RuntimeError("List is too small: " + str(len(dependencies)) + " < " + str(numDependencies)) * with nogil: */ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_dependencies, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17660, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_cydependencies = ((cudaGraphNode_t *)((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *)__pyx_t_1)->__pyx_base._pvt_ptr); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":17659 * for idx in range(len(dependencies)): * cydependencies[idx] = (dependencies[idx])._pvt_ptr[0] * elif len(dependencies) == 1: # <<<<<<<<<<<<<< * cydependencies = (dependencies[0])._pvt_ptr * if numDependencies > len(dependencies): raise RuntimeError("List is too small: " + str(len(dependencies)) + " < " + str(numDependencies)) */ } __pyx_L7:; /* "cuda/bindings/runtime.pyx":17661 * elif len(dependencies) == 1: * cydependencies = (dependencies[0])._pvt_ptr * if numDependencies > len(dependencies): raise RuntimeError("List is too small: " + str(len(dependencies)) + " < " + str(numDependencies)) # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaStreamUpdateCaptureDependencies(cystream, cydependencies, numDependencies, flags) */ __pyx_t_8 = PyObject_Length(__pyx_v_dependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17661, __pyx_L1_error) __pyx_t_4 = (__pyx_v_numDependencies > ((size_t)__pyx_t_8)); if (unlikely(__pyx_t_4)) { __pyx_t_5 = NULL; __Pyx_INCREF(__pyx_builtin_RuntimeError); __pyx_t_9 = __pyx_builtin_RuntimeError; __pyx_t_8 = PyObject_Length(__pyx_v_dependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17661, __pyx_L1_error) __pyx_t_3 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17661, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 17661, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_List_is_too_small, __pyx_t_11); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17661, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_11 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_3, __pyx_mstate_global->__pyx_kp_u__6); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 17661, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyLong_FromSize_t(__pyx_v_numDependencies); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17661, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 17661, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_11, __pyx_t_10); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17661, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_3}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_9, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17661, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 17661, __pyx_L1_error) } /* "cuda/bindings/runtime.pyx":17662 * cydependencies = (dependencies[0])._pvt_ptr * if numDependencies > len(dependencies): raise RuntimeError("List is too small: " + str(len(dependencies)) + " < " + str(numDependencies)) * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamUpdateCaptureDependencies(cystream, cydependencies, numDependencies, flags) * if len(dependencies) > 1 and cydependencies is not NULL: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":17663 * if numDependencies > len(dependencies): raise RuntimeError("List is too small: " + str(len(dependencies)) + " < " + str(numDependencies)) * with nogil: * err = cyruntime.cudaStreamUpdateCaptureDependencies(cystream, cydependencies, numDependencies, flags) # <<<<<<<<<<<<<< * if len(dependencies) > 1 and cydependencies is not NULL: * free(cydependencies) */ __pyx_t_14 = __pyx_f_4cuda_8bindings_9cyruntime_cudaStreamUpdateCaptureDependencies(__pyx_v_cystream, __pyx_v_cydependencies, __pyx_v_numDependencies, __pyx_v_flags); if (unlikely(__pyx_t_14 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 17663, __pyx_L13_error) __pyx_v_err = __pyx_t_14; } /* "cuda/bindings/runtime.pyx":17662 * cydependencies = (dependencies[0])._pvt_ptr * if numDependencies > len(dependencies): raise RuntimeError("List is too small: " + str(len(dependencies)) + " < " + str(numDependencies)) * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamUpdateCaptureDependencies(cystream, cydependencies, numDependencies, flags) * if len(dependencies) > 1 and cydependencies is not NULL: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L14; } __pyx_L13_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L14:; } } /* "cuda/bindings/runtime.pyx":17664 * with nogil: * err = cyruntime.cudaStreamUpdateCaptureDependencies(cystream, cydependencies, numDependencies, flags) * if len(dependencies) > 1 and cydependencies is not NULL: # <<<<<<<<<<<<<< * free(cydependencies) * return (_dict_cudaError_t[err],) */ __pyx_t_8 = PyObject_Length(__pyx_v_dependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17664, __pyx_L1_error) __pyx_t_2 = (__pyx_t_8 > 1); if (__pyx_t_2) { } else { __pyx_t_4 = __pyx_t_2; goto __pyx_L16_bool_binop_done; } __pyx_t_2 = (__pyx_v_cydependencies != NULL); __pyx_t_4 = __pyx_t_2; __pyx_L16_bool_binop_done:; if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":17665 * err = cyruntime.cudaStreamUpdateCaptureDependencies(cystream, cydependencies, numDependencies, flags) * if len(dependencies) > 1 and cydependencies is not NULL: * free(cydependencies) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ free(__pyx_v_cydependencies); /* "cuda/bindings/runtime.pyx":17664 * with nogil: * err = cyruntime.cudaStreamUpdateCaptureDependencies(cystream, cydependencies, numDependencies, flags) * if len(dependencies) > 1 and cydependencies is not NULL: # <<<<<<<<<<<<<< * free(cydependencies) * return (_dict_cudaError_t[err],) */ } /* "cuda/bindings/runtime.pyx":17666 * if len(dependencies) > 1 and cydependencies is not NULL: * free(cydependencies) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17666, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_9 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17666, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_9); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17666, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17666, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 17666, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_9; __pyx_t_9 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":17595 * return (_dict_cudaError_t[err], cudaStreamCaptureStatus(captureStatus_out), id_out, graph_out, pydependencies_out, pyedgeData_out, numDependencies_out) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamUpdateCaptureDependencies(stream, dependencies : Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies, unsigned int flags): * """ Update the set of dependencies in a capturing stream (11.3+) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamUpdateCaptureDependencies", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XDECREF(__pyx_gb_4cuda_8bindings_7runtime_35cudaStreamUpdateCaptureDependencies_2generator80); __Pyx_XDECREF(__pyx_v_dependencies); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":17668 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamUpdateCaptureDependencies_v2(stream, dependencies : Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], dependencyData : Optional[tuple[cudaGraphEdgeData] | list[cudaGraphEdgeData]], size_t numDependencies, unsigned int flags): * """ Update the set of dependencies in a capturing stream (12.3+) */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_127cudaStreamUpdateCaptureDependencies_v2(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_126cudaStreamUpdateCaptureDependencies_v2, "cudaStreamUpdateCaptureDependencies_v2(stream, dependencies: Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], dependencyData: Optional[tuple[cudaGraphEdgeData] | list[cudaGraphEdgeData]], size_t numDependencies, unsigned int flags)\n\nUpdate the set of dependencies in a capturing stream (12.3+)\n\nModifies the dependency set of a capturing stream. The dependency set\nis the set of nodes that the next captured node in the stream will\ndepend on.\n\nValid flags are :py:obj:`~.cudaStreamAddCaptureDependencies` and\n:py:obj:`~.cudaStreamSetCaptureDependencies`. These control whether the\nset passed to the API is added to the existing set or replaces it. A\nflags value of 0 defaults to\n:py:obj:`~.cudaStreamAddCaptureDependencies`.\n\nNodes that are removed from the dependency set via this API do not\nresult in :py:obj:`~.cudaErrorStreamCaptureUnjoined` if they are\nunreachable from the stream at :py:obj:`~.cudaStreamEndCapture`.\n\nReturns :py:obj:`~.cudaErrorIllegalState` if the stream is not\ncapturing.\n\nParameters\n----------\nstream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n The stream to update\ndependencies : list[:py:obj:`~.cudaGraphNode_t`]\n The set of dependencies to add\ndependencyData : list[:py:obj:`~.cudaGraphEdgeData`]\n Optional array of data associated with each dependency.\nnumDependencies : size_t\n The size of the dependencies array\nflags : unsigned int\n See above\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorIllegalState`\n\nSee Also\n--------\n:py:obj:`~.cudaStreamBeginCapture`, :py:obj:`~.cudaStreamGetCaptureInfo`,"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_127cudaStreamUpdateCaptureDependencies_v2 = {"cudaStreamUpdateCaptureDependencies_v2", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_127cudaStreamUpdateCaptureDependencies_v2, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_126cudaStreamUpdateCaptureDependencies_v2}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_127cudaStreamUpdateCaptureDependencies_v2(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_stream = 0; PyObject *__pyx_v_dependencies = 0; PyObject *__pyx_v_dependencyData = 0; size_t __pyx_v_numDependencies; unsigned int __pyx_v_flags; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[5] = {0,0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaStreamUpdateCaptureDependencies_v2 (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_stream,&__pyx_mstate_global->__pyx_n_u_dependencies,&__pyx_mstate_global->__pyx_n_u_dependencyData,&__pyx_mstate_global->__pyx_n_u_numDependencies,&__pyx_mstate_global->__pyx_n_u_flags_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 17668, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 5: values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 17668, __pyx_L3_error) CYTHON_FALLTHROUGH; case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 17668, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 17668, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 17668, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17668, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaStreamUpdateCaptureDependencies_v2", 0) < (0)) __PYX_ERR(0, 17668, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 5; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaStreamUpdateCaptureDependencies_v2", 1, 5, 5, i); __PYX_ERR(0, 17668, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 5)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17668, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 17668, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 17668, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 17668, __pyx_L3_error) values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 17668, __pyx_L3_error) } __pyx_v_stream = values[0]; __pyx_v_dependencies = values[1]; __pyx_v_dependencyData = values[2]; __pyx_v_numDependencies = __Pyx_PyLong_As_size_t(values[3]); if (unlikely((__pyx_v_numDependencies == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 17669, __pyx_L3_error) __pyx_v_flags = __Pyx_PyLong_As_unsigned_int(values[4]); if (unlikely((__pyx_v_flags == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 17669, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaStreamUpdateCaptureDependencies_v2", 1, 5, 5, __pyx_nargs); __PYX_ERR(0, 17668, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamUpdateCaptureDependencies_v2", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_126cudaStreamUpdateCaptureDependencies_v2(__pyx_self, __pyx_v_stream, __pyx_v_dependencies, __pyx_v_dependencyData, __pyx_v_numDependencies, __pyx_v_flags); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_38cudaStreamUpdateCaptureDependencies_v2_2generator81(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value); /* proto */ /* "cuda/bindings/runtime.pyx":17712 * """ * dependencyData = [] if dependencyData is None else dependencyData * if not all(isinstance(_x, (cudaGraphEdgeData,)) for _x in dependencyData): # <<<<<<<<<<<<<< * raise TypeError("Argument 'dependencyData' is not instance of type (expected tuple[cyruntime.cudaGraphEdgeData,] or list[cyruntime.cudaGraphEdgeData,]") * dependencies = [] if dependencies is None else dependencies */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_38cudaStreamUpdateCaptureDependencies_v2_genexpr(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_genexpr_arg_0) { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_81_genexpr *__pyx_cur_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("genexpr", 0); __pyx_cur_scope = (struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_81_genexpr *)__pyx_tp_new_4cuda_8bindings_7runtime___pyx_scope_struct_81_genexpr(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime___pyx_scope_struct_81_genexpr, __pyx_mstate_global->__pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_81_genexpr *)Py_None); __Pyx_INCREF(Py_None); __PYX_ERR(0, 17712, __pyx_L1_error) } else { __Pyx_GOTREF((PyObject *)__pyx_cur_scope); } __pyx_cur_scope->__pyx_genexpr_arg_0 = __pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_cur_scope->__pyx_genexpr_arg_0); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_genexpr_arg_0); { __pyx_CoroutineObject *gen = __Pyx_Generator_New((__pyx_coroutine_body_t) __pyx_gb_4cuda_8bindings_7runtime_38cudaStreamUpdateCaptureDependencies_v2_2generator81, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[81]), (PyObject *) __pyx_cur_scope, __pyx_mstate_global->__pyx_n_u_genexpr, __pyx_mstate_global->__pyx_n_u_cudaStreamUpdateCaptureDependenc_2, __pyx_mstate_global->__pyx_n_u_cuda_bindings_runtime); if (unlikely(!gen)) __PYX_ERR(0, 17712, __pyx_L1_error) __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; } /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamUpdateCaptureDependencies_v2.genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_DECREF((PyObject *)__pyx_cur_scope); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_38cudaStreamUpdateCaptureDependencies_v2_2generator81(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value) /* generator body */ { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_81_genexpr *__pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_81_genexpr *)__pyx_generator->closure); PyObject *__pyx_r = NULL; PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *(*__pyx_t_3)(PyObject *); PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("genexpr", 0); switch (__pyx_generator->resume_label) { case 0: goto __pyx_L3_first_run; default: /* CPython raises the right error here */ __Pyx_RefNannyFinishContext(); return NULL; } __pyx_L3_first_run:; if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 17712, __pyx_L1_error) if (unlikely(!__pyx_cur_scope->__pyx_genexpr_arg_0)) { __Pyx_RaiseUnboundLocalError(".0"); __PYX_ERR(0, 17712, __pyx_L1_error) } if (likely(PyList_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) || PyTuple_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) { __pyx_t_1 = __pyx_cur_scope->__pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; __pyx_t_3 = NULL; } else { __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_cur_scope->__pyx_genexpr_arg_0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17712, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17712, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_3)) { if (likely(PyList_CheckExact(__pyx_t_1))) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 17712, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } __pyx_t_4 = __Pyx_PyList_GetItemRef(__pyx_t_1, __pyx_t_2); ++__pyx_t_2; } else { { Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 17712, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2)); #else __pyx_t_4 = __Pyx_PySequence_ITEM(__pyx_t_1, __pyx_t_2); #endif ++__pyx_t_2; } if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17712, __pyx_L1_error) } else { __pyx_t_4 = __pyx_t_3(__pyx_t_1); if (unlikely(!__pyx_t_4)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 17712, __pyx_L1_error) PyErr_Clear(); } break; } } __Pyx_GOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v__x); __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v__x, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __Pyx_TypeCheck(__pyx_cur_scope->__pyx_v__x, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphEdgeData); __pyx_t_6 = (!__pyx_t_5); if (__pyx_t_6) { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_False); __pyx_r = Py_False; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_True); __pyx_r = Py_True; goto __pyx_L0; } CYTHON_MAYBE_UNUSED_VAR(__pyx_cur_scope); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); if (__Pyx_PyErr_Occurred()) { __Pyx_Generator_Replace_StopIteration(0); __Pyx_AddTraceback("genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); } __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); #if !CYTHON_USE_EXC_INFO_STACK __Pyx_Coroutine_ResetAndClearException(__pyx_generator); #endif __pyx_generator->resume_label = -1; __Pyx_Coroutine_clear((PyObject*)__pyx_generator); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_38cudaStreamUpdateCaptureDependencies_v2_5generator82(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value); /* proto */ /* "cuda/bindings/runtime.pyx":17715 * raise TypeError("Argument 'dependencyData' is not instance of type (expected tuple[cyruntime.cudaGraphEdgeData,] or list[cyruntime.cudaGraphEdgeData,]") * dependencies = [] if dependencies is None else dependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in dependencies): # <<<<<<<<<<<<<< * raise TypeError("Argument 'dependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaStream_t cystream */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_38cudaStreamUpdateCaptureDependencies_v2_3genexpr(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_genexpr_arg_0) { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_82_genexpr *__pyx_cur_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("genexpr", 0); __pyx_cur_scope = (struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_82_genexpr *)__pyx_tp_new_4cuda_8bindings_7runtime___pyx_scope_struct_82_genexpr(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime___pyx_scope_struct_82_genexpr, __pyx_mstate_global->__pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_82_genexpr *)Py_None); __Pyx_INCREF(Py_None); __PYX_ERR(0, 17715, __pyx_L1_error) } else { __Pyx_GOTREF((PyObject *)__pyx_cur_scope); } __pyx_cur_scope->__pyx_genexpr_arg_0 = __pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_cur_scope->__pyx_genexpr_arg_0); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_genexpr_arg_0); { __pyx_CoroutineObject *gen = __Pyx_Generator_New((__pyx_coroutine_body_t) __pyx_gb_4cuda_8bindings_7runtime_38cudaStreamUpdateCaptureDependencies_v2_5generator82, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[82]), (PyObject *) __pyx_cur_scope, __pyx_mstate_global->__pyx_n_u_genexpr, __pyx_mstate_global->__pyx_n_u_cudaStreamUpdateCaptureDependenc_2, __pyx_mstate_global->__pyx_n_u_cuda_bindings_runtime); if (unlikely(!gen)) __PYX_ERR(0, 17715, __pyx_L1_error) __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; } /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamUpdateCaptureDependencies_v2.genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_DECREF((PyObject *)__pyx_cur_scope); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_38cudaStreamUpdateCaptureDependencies_v2_5generator82(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value) /* generator body */ { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_82_genexpr *__pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_82_genexpr *)__pyx_generator->closure); PyObject *__pyx_r = NULL; PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *(*__pyx_t_3)(PyObject *); PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("genexpr", 0); switch (__pyx_generator->resume_label) { case 0: goto __pyx_L3_first_run; default: /* CPython raises the right error here */ __Pyx_RefNannyFinishContext(); return NULL; } __pyx_L3_first_run:; if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 17715, __pyx_L1_error) if (unlikely(!__pyx_cur_scope->__pyx_genexpr_arg_0)) { __Pyx_RaiseUnboundLocalError(".0"); __PYX_ERR(0, 17715, __pyx_L1_error) } if (likely(PyList_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) || PyTuple_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) { __pyx_t_1 = __pyx_cur_scope->__pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; __pyx_t_3 = NULL; } else { __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_cur_scope->__pyx_genexpr_arg_0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17715, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17715, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_3)) { if (likely(PyList_CheckExact(__pyx_t_1))) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 17715, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } __pyx_t_4 = __Pyx_PyList_GetItemRef(__pyx_t_1, __pyx_t_2); ++__pyx_t_2; } else { { Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 17715, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2)); #else __pyx_t_4 = __Pyx_PySequence_ITEM(__pyx_t_1, __pyx_t_2); #endif ++__pyx_t_2; } if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17715, __pyx_L1_error) } else { __pyx_t_4 = __pyx_t_3(__pyx_t_1); if (unlikely(!__pyx_t_4)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 17715, __pyx_L1_error) PyErr_Clear(); } break; } } __Pyx_GOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v__x); __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v__x, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_TypeCheck(__pyx_cur_scope->__pyx_v__x, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); if (!__pyx_t_6) { } else { __pyx_t_5 = __pyx_t_6; goto __pyx_L7_bool_binop_done; } __pyx_t_6 = __Pyx_TypeCheck(__pyx_cur_scope->__pyx_v__x, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraphNode); __pyx_t_5 = __pyx_t_6; __pyx_L7_bool_binop_done:; __pyx_t_6 = (!__pyx_t_5); if (__pyx_t_6) { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_False); __pyx_r = Py_False; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_True); __pyx_r = Py_True; goto __pyx_L0; } CYTHON_MAYBE_UNUSED_VAR(__pyx_cur_scope); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); if (__Pyx_PyErr_Occurred()) { __Pyx_Generator_Replace_StopIteration(0); __Pyx_AddTraceback("genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); } __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); #if !CYTHON_USE_EXC_INFO_STACK __Pyx_Coroutine_ResetAndClearException(__pyx_generator); #endif __pyx_generator->resume_label = -1; __Pyx_Coroutine_clear((PyObject*)__pyx_generator); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":17668 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamUpdateCaptureDependencies_v2(stream, dependencies : Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], dependencyData : Optional[tuple[cudaGraphEdgeData] | list[cudaGraphEdgeData]], size_t numDependencies, unsigned int flags): * """ Update the set of dependencies in a capturing stream (12.3+) */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_126cudaStreamUpdateCaptureDependencies_v2(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_stream, PyObject *__pyx_v_dependencies, PyObject *__pyx_v_dependencyData, size_t __pyx_v_numDependencies, unsigned int __pyx_v_flags) { cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; cudaGraphNode_t *__pyx_v_cydependencies; Py_ssize_t __pyx_v_idx; cudaGraphEdgeData *__pyx_v_cydependencyData; cudaError_t __pyx_v_err; PyObject *__pyx_gb_4cuda_8bindings_7runtime_38cudaStreamUpdateCaptureDependencies_v2_2generator81 = 0; PyObject *__pyx_gb_4cuda_8bindings_7runtime_38cudaStreamUpdateCaptureDependencies_v2_5generator82 = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; Py_ssize_t __pyx_t_8; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; Py_ssize_t __pyx_t_12; Py_ssize_t __pyx_t_13; struct cudaGraphEdgeData_st *__pyx_t_14; cudaError_t __pyx_t_15; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaStreamUpdateCaptureDependencies_v2", 0); __Pyx_INCREF(__pyx_v_dependencies); __Pyx_INCREF(__pyx_v_dependencyData); /* "cuda/bindings/runtime.pyx":17711 * :py:obj:`~.cudaStreamBeginCapture`, :py:obj:`~.cudaStreamGetCaptureInfo`, * """ * dependencyData = [] if dependencyData is None else dependencyData # <<<<<<<<<<<<<< * if not all(isinstance(_x, (cudaGraphEdgeData,)) for _x in dependencyData): * raise TypeError("Argument 'dependencyData' is not instance of type (expected tuple[cyruntime.cudaGraphEdgeData,] or list[cyruntime.cudaGraphEdgeData,]") */ __pyx_t_2 = (__pyx_v_dependencyData == Py_None); if (__pyx_t_2) { __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17711, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = __pyx_t_3; __pyx_t_3 = 0; } else { __Pyx_INCREF(__pyx_v_dependencyData); __pyx_t_1 = __pyx_v_dependencyData; } __Pyx_DECREF_SET(__pyx_v_dependencyData, __pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":17712 * """ * dependencyData = [] if dependencyData is None else dependencyData * if not all(isinstance(_x, (cudaGraphEdgeData,)) for _x in dependencyData): # <<<<<<<<<<<<<< * raise TypeError("Argument 'dependencyData' is not instance of type (expected tuple[cyruntime.cudaGraphEdgeData,] or list[cyruntime.cudaGraphEdgeData,]") * dependencies = [] if dependencies is None else dependencies */ __pyx_t_1 = __pyx_pf_4cuda_8bindings_7runtime_38cudaStreamUpdateCaptureDependencies_v2_genexpr(NULL, __pyx_v_dependencyData); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17712, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_Generator_GetInlinedResult(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17712, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 17712, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_4 = (!__pyx_t_2); if (unlikely(__pyx_t_4)) { /* "cuda/bindings/runtime.pyx":17713 * dependencyData = [] if dependencyData is None else dependencyData * if not all(isinstance(_x, (cudaGraphEdgeData,)) for _x in dependencyData): * raise TypeError("Argument 'dependencyData' is not instance of type (expected tuple[cyruntime.cudaGraphEdgeData,] or list[cyruntime.cudaGraphEdgeData,]") # <<<<<<<<<<<<<< * dependencies = [] if dependencies is None else dependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in dependencies): */ __pyx_t_1 = NULL; __Pyx_INCREF(__pyx_builtin_TypeError); __pyx_t_5 = __pyx_builtin_TypeError; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_Argument_dependencyData_is_not_i}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17713, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 17713, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":17712 * """ * dependencyData = [] if dependencyData is None else dependencyData * if not all(isinstance(_x, (cudaGraphEdgeData,)) for _x in dependencyData): # <<<<<<<<<<<<<< * raise TypeError("Argument 'dependencyData' is not instance of type (expected tuple[cyruntime.cudaGraphEdgeData,] or list[cyruntime.cudaGraphEdgeData,]") * dependencies = [] if dependencies is None else dependencies */ } /* "cuda/bindings/runtime.pyx":17714 * if not all(isinstance(_x, (cudaGraphEdgeData,)) for _x in dependencyData): * raise TypeError("Argument 'dependencyData' is not instance of type (expected tuple[cyruntime.cudaGraphEdgeData,] or list[cyruntime.cudaGraphEdgeData,]") * dependencies = [] if dependencies is None else dependencies # <<<<<<<<<<<<<< * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in dependencies): * raise TypeError("Argument 'dependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") */ __pyx_t_4 = (__pyx_v_dependencies == Py_None); if (__pyx_t_4) { __pyx_t_5 = PyList_New(0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17714, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __pyx_t_5; __pyx_t_5 = 0; } else { __Pyx_INCREF(__pyx_v_dependencies); __pyx_t_3 = __pyx_v_dependencies; } __Pyx_DECREF_SET(__pyx_v_dependencies, __pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":17715 * raise TypeError("Argument 'dependencyData' is not instance of type (expected tuple[cyruntime.cudaGraphEdgeData,] or list[cyruntime.cudaGraphEdgeData,]") * dependencies = [] if dependencies is None else dependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in dependencies): # <<<<<<<<<<<<<< * raise TypeError("Argument 'dependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaStream_t cystream */ __pyx_t_3 = __pyx_pf_4cuda_8bindings_7runtime_38cudaStreamUpdateCaptureDependencies_v2_3genexpr(NULL, __pyx_v_dependencies); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17715, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_Generator_GetInlinedResult(__pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17715, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 17715, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_2 = (!__pyx_t_4); if (unlikely(__pyx_t_2)) { /* "cuda/bindings/runtime.pyx":17716 * dependencies = [] if dependencies is None else dependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in dependencies): * raise TypeError("Argument 'dependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") # <<<<<<<<<<<<<< * cdef cyruntime.cudaStream_t cystream * if stream is None: */ __pyx_t_3 = NULL; __Pyx_INCREF(__pyx_builtin_TypeError); __pyx_t_1 = __pyx_builtin_TypeError; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Argument_dependencies_is_not_ins}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17716, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); } __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(0, 17716, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":17715 * raise TypeError("Argument 'dependencyData' is not instance of type (expected tuple[cyruntime.cudaGraphEdgeData,] or list[cyruntime.cudaGraphEdgeData,]") * dependencies = [] if dependencies is None else dependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in dependencies): # <<<<<<<<<<<<<< * raise TypeError("Argument 'dependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaStream_t cystream */ } /* "cuda/bindings/runtime.pyx":17718 * raise TypeError("Argument 'dependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_2 = (__pyx_v_stream == Py_None); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":17719 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":17718 * raise TypeError("Argument 'dependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L5; } /* "cuda/bindings/runtime.pyx":17720 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_4 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_4) { } else { __pyx_t_2 = __pyx_t_4; goto __pyx_L6_bool_binop_done; } __pyx_t_4 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_2 = __pyx_t_4; __pyx_L6_bool_binop_done:; if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":17721 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_5 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17721, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_v_pstream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":17720 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L5; } /* "cuda/bindings/runtime.pyx":17723 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * cdef cyruntime.cudaGraphNode_t* cydependencies = NULL */ /*else*/ { __pyx_t_1 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_v_stream}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17723, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_t_3 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_5)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17723, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF((PyObject *)__pyx_t_5); __pyx_t_5 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; } __pyx_L5:; /* "cuda/bindings/runtime.pyx":17724 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * cdef cyruntime.cudaGraphNode_t* cydependencies = NULL * if len(dependencies) > 1: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 17724, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":17725 * pstream = int(cudaStream_t(stream)) * cystream = pstream * cdef cyruntime.cudaGraphNode_t* cydependencies = NULL # <<<<<<<<<<<<<< * if len(dependencies) > 1: * cydependencies = calloc(len(dependencies), sizeof(cyruntime.cudaGraphNode_t)) */ __pyx_v_cydependencies = NULL; /* "cuda/bindings/runtime.pyx":17726 * cystream = pstream * cdef cyruntime.cudaGraphNode_t* cydependencies = NULL * if len(dependencies) > 1: # <<<<<<<<<<<<<< * cydependencies = calloc(len(dependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cydependencies is NULL: */ __pyx_t_8 = PyObject_Length(__pyx_v_dependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17726, __pyx_L1_error) __pyx_t_2 = (__pyx_t_8 > 1); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":17727 * cdef cyruntime.cudaGraphNode_t* cydependencies = NULL * if len(dependencies) > 1: * cydependencies = calloc(len(dependencies), sizeof(cyruntime.cudaGraphNode_t)) # <<<<<<<<<<<<<< * if cydependencies is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) */ __pyx_t_8 = PyObject_Length(__pyx_v_dependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17727, __pyx_L1_error) __pyx_v_cydependencies = ((cudaGraphNode_t *)calloc(__pyx_t_8, (sizeof(cudaGraphNode_t)))); /* "cuda/bindings/runtime.pyx":17728 * if len(dependencies) > 1: * cydependencies = calloc(len(dependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cydependencies is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) * else: */ __pyx_t_2 = (__pyx_v_cydependencies == NULL); if (unlikely(__pyx_t_2)) { /* "cuda/bindings/runtime.pyx":17729 * cydependencies = calloc(len(dependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cydependencies is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) # <<<<<<<<<<<<<< * else: * for idx in range(len(dependencies)): */ __pyx_t_5 = NULL; __Pyx_INCREF(__pyx_builtin_MemoryError); __pyx_t_1 = __pyx_builtin_MemoryError; __pyx_t_8 = PyObject_Length(__pyx_v_dependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17729, __pyx_L1_error) __pyx_t_9 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17729, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 17729, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_Failed_to_allocate_length_x_size, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17729, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_x_2); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 17729, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyLong_FromSize_t((sizeof(cudaGraphNode_t))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17729, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_9); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 17729, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_10, __pyx_t_11); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17729, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_9}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17729, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 17729, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":17728 * if len(dependencies) > 1: * cydependencies = calloc(len(dependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cydependencies is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) * else: */ } /* "cuda/bindings/runtime.pyx":17731 * raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) * else: * for idx in range(len(dependencies)): # <<<<<<<<<<<<<< * cydependencies[idx] = (dependencies[idx])._pvt_ptr[0] * elif len(dependencies) == 1: */ /*else*/ { __pyx_t_8 = PyObject_Length(__pyx_v_dependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17731, __pyx_L1_error) __pyx_t_12 = __pyx_t_8; for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { __pyx_v_idx = __pyx_t_13; /* "cuda/bindings/runtime.pyx":17732 * else: * for idx in range(len(dependencies)): * cydependencies[idx] = (dependencies[idx])._pvt_ptr[0] # <<<<<<<<<<<<<< * elif len(dependencies) == 1: * cydependencies = (dependencies[0])._pvt_ptr */ __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_dependencies, __pyx_v_idx, Py_ssize_t, 1, PyLong_FromSsize_t, 0, 1, 1, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17732, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); (__pyx_v_cydependencies[__pyx_v_idx]) = ((cudaGraphNode_t)(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *)__pyx_t_3)->__pyx_base._pvt_ptr[0])); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } } /* "cuda/bindings/runtime.pyx":17726 * cystream = pstream * cdef cyruntime.cudaGraphNode_t* cydependencies = NULL * if len(dependencies) > 1: # <<<<<<<<<<<<<< * cydependencies = calloc(len(dependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cydependencies is NULL: */ goto __pyx_L8; } /* "cuda/bindings/runtime.pyx":17733 * for idx in range(len(dependencies)): * cydependencies[idx] = (dependencies[idx])._pvt_ptr[0] * elif len(dependencies) == 1: # <<<<<<<<<<<<<< * cydependencies = (dependencies[0])._pvt_ptr * cdef cyruntime.cudaGraphEdgeData* cydependencyData = NULL */ __pyx_t_8 = PyObject_Length(__pyx_v_dependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17733, __pyx_L1_error) __pyx_t_2 = (__pyx_t_8 == 1); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":17734 * cydependencies[idx] = (dependencies[idx])._pvt_ptr[0] * elif len(dependencies) == 1: * cydependencies = (dependencies[0])._pvt_ptr # <<<<<<<<<<<<<< * cdef cyruntime.cudaGraphEdgeData* cydependencyData = NULL * if len(dependencyData) > 1: */ __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_dependencies, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17734, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_cydependencies = ((cudaGraphNode_t *)((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *)__pyx_t_3)->__pyx_base._pvt_ptr); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":17733 * for idx in range(len(dependencies)): * cydependencies[idx] = (dependencies[idx])._pvt_ptr[0] * elif len(dependencies) == 1: # <<<<<<<<<<<<<< * cydependencies = (dependencies[0])._pvt_ptr * cdef cyruntime.cudaGraphEdgeData* cydependencyData = NULL */ } __pyx_L8:; /* "cuda/bindings/runtime.pyx":17735 * elif len(dependencies) == 1: * cydependencies = (dependencies[0])._pvt_ptr * cdef cyruntime.cudaGraphEdgeData* cydependencyData = NULL # <<<<<<<<<<<<<< * if len(dependencyData) > 1: * cydependencyData = calloc(len(dependencyData), sizeof(cyruntime.cudaGraphEdgeData)) */ __pyx_v_cydependencyData = NULL; /* "cuda/bindings/runtime.pyx":17736 * cydependencies = (dependencies[0])._pvt_ptr * cdef cyruntime.cudaGraphEdgeData* cydependencyData = NULL * if len(dependencyData) > 1: # <<<<<<<<<<<<<< * cydependencyData = calloc(len(dependencyData), sizeof(cyruntime.cudaGraphEdgeData)) * if cydependencyData is NULL: */ __pyx_t_8 = PyObject_Length(__pyx_v_dependencyData); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17736, __pyx_L1_error) __pyx_t_2 = (__pyx_t_8 > 1); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":17737 * cdef cyruntime.cudaGraphEdgeData* cydependencyData = NULL * if len(dependencyData) > 1: * cydependencyData = calloc(len(dependencyData), sizeof(cyruntime.cudaGraphEdgeData)) # <<<<<<<<<<<<<< * if cydependencyData is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencyData)) + 'x' + str(sizeof(cyruntime.cudaGraphEdgeData))) */ __pyx_t_8 = PyObject_Length(__pyx_v_dependencyData); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17737, __pyx_L1_error) __pyx_v_cydependencyData = ((cudaGraphEdgeData *)calloc(__pyx_t_8, (sizeof(cudaGraphEdgeData)))); /* "cuda/bindings/runtime.pyx":17738 * if len(dependencyData) > 1: * cydependencyData = calloc(len(dependencyData), sizeof(cyruntime.cudaGraphEdgeData)) * if cydependencyData is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencyData)) + 'x' + str(sizeof(cyruntime.cudaGraphEdgeData))) * for idx in range(len(dependencyData)): */ __pyx_t_2 = (__pyx_v_cydependencyData == NULL); if (unlikely(__pyx_t_2)) { /* "cuda/bindings/runtime.pyx":17739 * cydependencyData = calloc(len(dependencyData), sizeof(cyruntime.cudaGraphEdgeData)) * if cydependencyData is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencyData)) + 'x' + str(sizeof(cyruntime.cudaGraphEdgeData))) # <<<<<<<<<<<<<< * for idx in range(len(dependencyData)): * string.memcpy(&cydependencyData[idx], (dependencyData[idx])._pvt_ptr, sizeof(cyruntime.cudaGraphEdgeData)) */ __pyx_t_1 = NULL; __Pyx_INCREF(__pyx_builtin_MemoryError); __pyx_t_9 = __pyx_builtin_MemoryError; __pyx_t_8 = PyObject_Length(__pyx_v_dependencyData); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17739, __pyx_L1_error) __pyx_t_5 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17739, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 17739, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_Failed_to_allocate_length_x_size, __pyx_t_11); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17739, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_11 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_x_2); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 17739, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyLong_FromSize_t((sizeof(cudaGraphEdgeData))); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17739, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 17739, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_11, __pyx_t_10); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17739, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_t_5}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_9, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17739, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 17739, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":17738 * if len(dependencyData) > 1: * cydependencyData = calloc(len(dependencyData), sizeof(cyruntime.cudaGraphEdgeData)) * if cydependencyData is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencyData)) + 'x' + str(sizeof(cyruntime.cudaGraphEdgeData))) * for idx in range(len(dependencyData)): */ } /* "cuda/bindings/runtime.pyx":17740 * if cydependencyData is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencyData)) + 'x' + str(sizeof(cyruntime.cudaGraphEdgeData))) * for idx in range(len(dependencyData)): # <<<<<<<<<<<<<< * string.memcpy(&cydependencyData[idx], (dependencyData[idx])._pvt_ptr, sizeof(cyruntime.cudaGraphEdgeData)) * elif len(dependencyData) == 1: */ __pyx_t_8 = PyObject_Length(__pyx_v_dependencyData); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17740, __pyx_L1_error) __pyx_t_12 = __pyx_t_8; for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { __pyx_v_idx = __pyx_t_13; /* "cuda/bindings/runtime.pyx":17741 * raise MemoryError('Failed to allocate length x size memory: ' + str(len(dependencyData)) + 'x' + str(sizeof(cyruntime.cudaGraphEdgeData))) * for idx in range(len(dependencyData)): * string.memcpy(&cydependencyData[idx], (dependencyData[idx])._pvt_ptr, sizeof(cyruntime.cudaGraphEdgeData)) # <<<<<<<<<<<<<< * elif len(dependencyData) == 1: * cydependencyData = (dependencyData[0])._pvt_ptr */ __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_dependencyData, __pyx_v_idx, Py_ssize_t, 1, PyLong_FromSsize_t, 0, 1, 1, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17741, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); (void)(memcpy((&(__pyx_v_cydependencyData[__pyx_v_idx])), ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphEdgeData *)__pyx_t_3)->__pyx_base._pvt_ptr, (sizeof(cudaGraphEdgeData)))); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } /* "cuda/bindings/runtime.pyx":17736 * cydependencies = (dependencies[0])._pvt_ptr * cdef cyruntime.cudaGraphEdgeData* cydependencyData = NULL * if len(dependencyData) > 1: # <<<<<<<<<<<<<< * cydependencyData = calloc(len(dependencyData), sizeof(cyruntime.cudaGraphEdgeData)) * if cydependencyData is NULL: */ goto __pyx_L12; } /* "cuda/bindings/runtime.pyx":17742 * for idx in range(len(dependencyData)): * string.memcpy(&cydependencyData[idx], (dependencyData[idx])._pvt_ptr, sizeof(cyruntime.cudaGraphEdgeData)) * elif len(dependencyData) == 1: # <<<<<<<<<<<<<< * cydependencyData = (dependencyData[0])._pvt_ptr * with nogil: */ __pyx_t_8 = PyObject_Length(__pyx_v_dependencyData); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17742, __pyx_L1_error) __pyx_t_2 = (__pyx_t_8 == 1); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":17743 * string.memcpy(&cydependencyData[idx], (dependencyData[idx])._pvt_ptr, sizeof(cyruntime.cudaGraphEdgeData)) * elif len(dependencyData) == 1: * cydependencyData = (dependencyData[0])._pvt_ptr # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaStreamUpdateCaptureDependencies_v2(cystream, cydependencies, cydependencyData, numDependencies, flags) */ __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_dependencyData, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17743, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_14 = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphEdgeData *)__pyx_t_3)->__pyx_base._pvt_ptr; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_cydependencyData = __pyx_t_14; /* "cuda/bindings/runtime.pyx":17742 * for idx in range(len(dependencyData)): * string.memcpy(&cydependencyData[idx], (dependencyData[idx])._pvt_ptr, sizeof(cyruntime.cudaGraphEdgeData)) * elif len(dependencyData) == 1: # <<<<<<<<<<<<<< * cydependencyData = (dependencyData[0])._pvt_ptr * with nogil: */ } __pyx_L12:; /* "cuda/bindings/runtime.pyx":17744 * elif len(dependencyData) == 1: * cydependencyData = (dependencyData[0])._pvt_ptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamUpdateCaptureDependencies_v2(cystream, cydependencies, cydependencyData, numDependencies, flags) * if len(dependencies) > 1 and cydependencies is not NULL: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":17745 * cydependencyData = (dependencyData[0])._pvt_ptr * with nogil: * err = cyruntime.cudaStreamUpdateCaptureDependencies_v2(cystream, cydependencies, cydependencyData, numDependencies, flags) # <<<<<<<<<<<<<< * if len(dependencies) > 1 and cydependencies is not NULL: * free(cydependencies) */ __pyx_t_15 = __pyx_f_4cuda_8bindings_9cyruntime_cudaStreamUpdateCaptureDependencies_v2(__pyx_v_cystream, __pyx_v_cydependencies, __pyx_v_cydependencyData, __pyx_v_numDependencies, __pyx_v_flags); if (unlikely(__pyx_t_15 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 17745, __pyx_L17_error) __pyx_v_err = __pyx_t_15; } /* "cuda/bindings/runtime.pyx":17744 * elif len(dependencyData) == 1: * cydependencyData = (dependencyData[0])._pvt_ptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaStreamUpdateCaptureDependencies_v2(cystream, cydependencies, cydependencyData, numDependencies, flags) * if len(dependencies) > 1 and cydependencies is not NULL: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L18; } __pyx_L17_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L18:; } } /* "cuda/bindings/runtime.pyx":17746 * with nogil: * err = cyruntime.cudaStreamUpdateCaptureDependencies_v2(cystream, cydependencies, cydependencyData, numDependencies, flags) * if len(dependencies) > 1 and cydependencies is not NULL: # <<<<<<<<<<<<<< * free(cydependencies) * if len(dependencyData) > 1 and cydependencyData is not NULL: */ __pyx_t_8 = PyObject_Length(__pyx_v_dependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17746, __pyx_L1_error) __pyx_t_4 = (__pyx_t_8 > 1); if (__pyx_t_4) { } else { __pyx_t_2 = __pyx_t_4; goto __pyx_L20_bool_binop_done; } __pyx_t_4 = (__pyx_v_cydependencies != NULL); __pyx_t_2 = __pyx_t_4; __pyx_L20_bool_binop_done:; if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":17747 * err = cyruntime.cudaStreamUpdateCaptureDependencies_v2(cystream, cydependencies, cydependencyData, numDependencies, flags) * if len(dependencies) > 1 and cydependencies is not NULL: * free(cydependencies) # <<<<<<<<<<<<<< * if len(dependencyData) > 1 and cydependencyData is not NULL: * free(cydependencyData) */ free(__pyx_v_cydependencies); /* "cuda/bindings/runtime.pyx":17746 * with nogil: * err = cyruntime.cudaStreamUpdateCaptureDependencies_v2(cystream, cydependencies, cydependencyData, numDependencies, flags) * if len(dependencies) > 1 and cydependencies is not NULL: # <<<<<<<<<<<<<< * free(cydependencies) * if len(dependencyData) > 1 and cydependencyData is not NULL: */ } /* "cuda/bindings/runtime.pyx":17748 * if len(dependencies) > 1 and cydependencies is not NULL: * free(cydependencies) * if len(dependencyData) > 1 and cydependencyData is not NULL: # <<<<<<<<<<<<<< * free(cydependencyData) * return (_dict_cudaError_t[err],) */ __pyx_t_8 = PyObject_Length(__pyx_v_dependencyData); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 17748, __pyx_L1_error) __pyx_t_4 = (__pyx_t_8 > 1); if (__pyx_t_4) { } else { __pyx_t_2 = __pyx_t_4; goto __pyx_L23_bool_binop_done; } __pyx_t_4 = (__pyx_v_cydependencyData != NULL); __pyx_t_2 = __pyx_t_4; __pyx_L23_bool_binop_done:; if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":17749 * free(cydependencies) * if len(dependencyData) > 1 and cydependencyData is not NULL: * free(cydependencyData) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ free(__pyx_v_cydependencyData); /* "cuda/bindings/runtime.pyx":17748 * if len(dependencies) > 1 and cydependencies is not NULL: * free(cydependencies) * if len(dependencyData) > 1 and cydependencyData is not NULL: # <<<<<<<<<<<<<< * free(cydependencyData) * return (_dict_cudaError_t[err],) */ } /* "cuda/bindings/runtime.pyx":17750 * if len(dependencyData) > 1 and cydependencyData is not NULL: * free(cydependencyData) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17750, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_9 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17750, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_9); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17750, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 17750, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 17750, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_9; __pyx_t_9 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":17668 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaStreamUpdateCaptureDependencies_v2(stream, dependencies : Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], dependencyData : Optional[tuple[cudaGraphEdgeData] | list[cudaGraphEdgeData]], size_t numDependencies, unsigned int flags): * """ Update the set of dependencies in a capturing stream (12.3+) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("cuda.bindings.runtime.cudaStreamUpdateCaptureDependencies_v2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XDECREF(__pyx_gb_4cuda_8bindings_7runtime_38cudaStreamUpdateCaptureDependencies_v2_2generator81); __Pyx_XDECREF(__pyx_gb_4cuda_8bindings_7runtime_38cudaStreamUpdateCaptureDependencies_v2_5generator82); __Pyx_XDECREF(__pyx_v_dependencies); __Pyx_XDECREF(__pyx_v_dependencyData); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":17752 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaEventCreate(): * """ Creates an event object. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_129cudaEventCreate(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_128cudaEventCreate, "cudaEventCreate()\n\nCreates an event object.\n\nCreates an event object for the current device using\n:py:obj:`~.cudaEventDefault`.\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorLaunchFailure`, :py:obj:`~.cudaErrorMemoryAllocation`\nevent : :py:obj:`~.cudaEvent_t`\n Newly created event\n\nSee Also\n--------\ncudaEventCreate (C++ API), :py:obj:`~.cudaEventCreateWithFlags`, :py:obj:`~.cudaEventRecord`, :py:obj:`~.cudaEventQuery`, :py:obj:`~.cudaEventSynchronize`, :py:obj:`~.cudaEventDestroy`, :py:obj:`~.cudaEventElapsedTime`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cuEventCreate`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_129cudaEventCreate = {"cudaEventCreate", (PyCFunction)__pyx_pw_4cuda_8bindings_7runtime_129cudaEventCreate, METH_NOARGS, __pyx_doc_4cuda_8bindings_7runtime_128cudaEventCreate}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_129cudaEventCreate(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaEventCreate (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_128cudaEventCreate(__pyx_self); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_128cudaEventCreate(CYTHON_UNUSED PyObject *__pyx_self) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaEvent_t *__pyx_v_event = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; cudaError_t __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaEventCreate", 0); /* "cuda/bindings/runtime.pyx":17770 * cudaEventCreate (C++ API), :py:obj:`~.cudaEventCreateWithFlags`, :py:obj:`~.cudaEventRecord`, :py:obj:`~.cudaEventQuery`, :py:obj:`~.cudaEventSynchronize`, :py:obj:`~.cudaEventDestroy`, :py:obj:`~.cudaEventElapsedTime`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cuEventCreate` * """ * cdef cudaEvent_t event = cudaEvent_t() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaEventCreate(event._pvt_ptr) */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (1-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17770, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_event = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEvent_t *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":17771 * """ * cdef cudaEvent_t event = cudaEvent_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaEventCreate(event._pvt_ptr) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":17772 * cdef cudaEvent_t event = cudaEvent_t() * with nogil: * err = cyruntime.cudaEventCreate(event._pvt_ptr) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_5 = __pyx_f_4cuda_8bindings_9cyruntime_cudaEventCreate(((cudaEvent_t *)__pyx_v_event->__pyx_base._pvt_ptr)); if (unlikely(__pyx_t_5 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 17772, __pyx_L4_error) __pyx_v_err = __pyx_t_5; } /* "cuda/bindings/runtime.pyx":17771 * """ * cdef cudaEvent_t event = cudaEvent_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaEventCreate(event._pvt_ptr) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":17773 * with nogil: * err = cyruntime.cudaEventCreate(event._pvt_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], event) */ __pyx_t_6 = (__pyx_v_err != cudaSuccess); if (__pyx_t_6) { /* "cuda/bindings/runtime.pyx":17774 * err = cyruntime.cudaEventCreate(event._pvt_ptr) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], event) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17774, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17774, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17774, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17774, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 17774, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 17774, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":17773 * with nogil: * err = cyruntime.cudaEventCreate(event._pvt_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], event) */ } /* "cuda/bindings/runtime.pyx":17775 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], event) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17775, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17775, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17775, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17775, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 17775, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_event); __Pyx_GIVEREF((PyObject *)__pyx_v_event); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_event)) != (0)) __PYX_ERR(0, 17775, __pyx_L1_error); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":17752 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaEventCreate(): * """ Creates an event object. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaEventCreate", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_event); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":17777 * return (_dict_cudaError_t[err], event) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaEventCreateWithFlags(unsigned int flags): * """ Creates an event object with the specified flags. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_131cudaEventCreateWithFlags(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_130cudaEventCreateWithFlags, "cudaEventCreateWithFlags(unsigned int flags)\n\nCreates an event object with the specified flags.\n\nCreates an event object for the current device with the specified\nflags. Valid flags include:\n\n- :py:obj:`~.cudaEventDefault`: Default event creation flag.\n\n- :py:obj:`~.cudaEventBlockingSync`: Specifies that event should use\n blocking synchronization. A host thread that uses\n :py:obj:`~.cudaEventSynchronize()` to wait on an event created with\n this flag will block until the event actually completes.\n\n- :py:obj:`~.cudaEventDisableTiming`: Specifies that the created event\n does not need to record timing data. Events created with this flag\n specified and the :py:obj:`~.cudaEventBlockingSync` flag not\n specified will provide the best performance when used with\n :py:obj:`~.cudaStreamWaitEvent()` and :py:obj:`~.cudaEventQuery()`.\n\n- :py:obj:`~.cudaEventInterprocess`: Specifies that the created event\n may be used as an interprocess event by\n :py:obj:`~.cudaIpcGetEventHandle()`.\n :py:obj:`~.cudaEventInterprocess` must be specified along with\n :py:obj:`~.cudaEventDisableTiming`.\n\nParameters\n----------\nflags : unsigned int\n Flags for new event\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorLaunchFailure`, :py:obj:`~.cudaErrorMemoryAllocation`\nevent : :py:obj:`~.cudaEvent_t`\n Newly created event\n\nSee Also\n--------\n:py:obj:`~.cudaEventCreate (C API)`, :py:obj:`~.cudaEventSynchronize`, :py:obj:`~.cudaEventDestroy`, :py:obj:`~.cudaEventElapsedTime`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cuEventCreate`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_131cudaEventCreateWithFlags = {"cudaEventCreateWithFlags", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_131cudaEventCreateWithFlags, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_130cudaEventCreateWithFlags}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_131cudaEventCreateWithFlags(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { unsigned int __pyx_v_flags; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaEventCreateWithFlags (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_flags_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 17777, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17777, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaEventCreateWithFlags", 0) < (0)) __PYX_ERR(0, 17777, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaEventCreateWithFlags", 1, 1, 1, i); __PYX_ERR(0, 17777, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17777, __pyx_L3_error) } __pyx_v_flags = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_flags == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 17778, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaEventCreateWithFlags", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 17777, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaEventCreateWithFlags", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_130cudaEventCreateWithFlags(__pyx_self, __pyx_v_flags); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_130cudaEventCreateWithFlags(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_flags) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaEvent_t *__pyx_v_event = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; cudaError_t __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaEventCreateWithFlags", 0); /* "cuda/bindings/runtime.pyx":17819 * :py:obj:`~.cudaEventCreate (C API)`, :py:obj:`~.cudaEventSynchronize`, :py:obj:`~.cudaEventDestroy`, :py:obj:`~.cudaEventElapsedTime`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cuEventCreate` * """ * cdef cudaEvent_t event = cudaEvent_t() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaEventCreateWithFlags(event._pvt_ptr, flags) */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (1-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17819, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_event = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEvent_t *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":17820 * """ * cdef cudaEvent_t event = cudaEvent_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaEventCreateWithFlags(event._pvt_ptr, flags) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":17821 * cdef cudaEvent_t event = cudaEvent_t() * with nogil: * err = cyruntime.cudaEventCreateWithFlags(event._pvt_ptr, flags) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_5 = __pyx_f_4cuda_8bindings_9cyruntime_cudaEventCreateWithFlags(((cudaEvent_t *)__pyx_v_event->__pyx_base._pvt_ptr), __pyx_v_flags); if (unlikely(__pyx_t_5 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 17821, __pyx_L4_error) __pyx_v_err = __pyx_t_5; } /* "cuda/bindings/runtime.pyx":17820 * """ * cdef cudaEvent_t event = cudaEvent_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaEventCreateWithFlags(event._pvt_ptr, flags) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":17822 * with nogil: * err = cyruntime.cudaEventCreateWithFlags(event._pvt_ptr, flags) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], event) */ __pyx_t_6 = (__pyx_v_err != cudaSuccess); if (__pyx_t_6) { /* "cuda/bindings/runtime.pyx":17823 * err = cyruntime.cudaEventCreateWithFlags(event._pvt_ptr, flags) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], event) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17823, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17823, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17823, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17823, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 17823, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 17823, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":17822 * with nogil: * err = cyruntime.cudaEventCreateWithFlags(event._pvt_ptr, flags) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], event) */ } /* "cuda/bindings/runtime.pyx":17824 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], event) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17824, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17824, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17824, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17824, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 17824, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_event); __Pyx_GIVEREF((PyObject *)__pyx_v_event); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_event)) != (0)) __PYX_ERR(0, 17824, __pyx_L1_error); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":17777 * return (_dict_cudaError_t[err], event) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaEventCreateWithFlags(unsigned int flags): * """ Creates an event object with the specified flags. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaEventCreateWithFlags", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_event); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":17826 * return (_dict_cudaError_t[err], event) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaEventRecord(event, stream): * """ Records an event. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_133cudaEventRecord(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_132cudaEventRecord, "cudaEventRecord(event, stream)\n\nRecords an event.\n\nCaptures in `event` the contents of `stream` at the time of this call.\n`event` and `stream` must be on the same CUDA context. Calls such as\n:py:obj:`~.cudaEventQuery()` or :py:obj:`~.cudaStreamWaitEvent()` will\nthen examine or wait for completion of the work that was captured. Uses\nof `stream` after this call do not modify `event`. See note on default\nstream behavior for what is captured in the default case.\n\n:py:obj:`~.cudaEventRecord()` can be called multiple times on the same\nevent and will overwrite the previously captured state. Other APIs such\nas :py:obj:`~.cudaStreamWaitEvent()` use the most recently captured\nstate at the time of the API call, and are not affected by later calls\nto :py:obj:`~.cudaEventRecord()`. Before the first call to\n:py:obj:`~.cudaEventRecord()`, an event represents an empty set of\nwork, so for example :py:obj:`~.cudaEventQuery()` would return\n:py:obj:`~.cudaSuccess`.\n\nParameters\n----------\nevent : :py:obj:`~.CUevent` or :py:obj:`~.cudaEvent_t`\n Event to record\nstream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Stream in which to record event\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidResourceHandle`, :py:obj:`~.cudaErrorLaunchFailure`\n\nSee Also\n--------\n:py:obj:`~.cudaEventCreate (C API)`, :py:obj:`~.cudaEventCreateWithFlags`, :py:obj:`~.cudaEventQuery`, :py:obj:`~.cudaEventSynchronize`, :py:obj:`~.cudaEventDestroy`, :py:obj:`~.cudaEventElapsedTime`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cudaEventRecordWithFlags`, :py:obj:`~.cuEventRecord`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_133cudaEventRecord = {"cudaEventRecord", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_133cudaEventRecord, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_132cudaEventRecord}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_133cudaEventRecord(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_event = 0; PyObject *__pyx_v_stream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaEventRecord (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_event_2,&__pyx_mstate_global->__pyx_n_u_stream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 17826, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 17826, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17826, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaEventRecord", 0) < (0)) __PYX_ERR(0, 17826, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaEventRecord", 1, 2, 2, i); __PYX_ERR(0, 17826, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17826, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 17826, __pyx_L3_error) } __pyx_v_event = values[0]; __pyx_v_stream = values[1]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaEventRecord", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 17826, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaEventRecord", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_132cudaEventRecord(__pyx_self, __pyx_v_event, __pyx_v_stream); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_132cudaEventRecord(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_event, PyObject *__pyx_v_stream) { cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; cudaEvent_t __pyx_v_cyevent; PyObject *__pyx_v_pevent = NULL; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaEventRecord", 0); /* "cuda/bindings/runtime.pyx":17863 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":17864 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":17863 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":17865 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":17866 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17866, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":17865 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":17868 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * cdef cyruntime.cudaEvent_t cyevent */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_stream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17868, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17868, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":17869 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * cdef cyruntime.cudaEvent_t cyevent * if event is None: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 17869, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":17871 * cystream = pstream * cdef cyruntime.cudaEvent_t cyevent * if event is None: # <<<<<<<<<<<<<< * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): */ __pyx_t_1 = (__pyx_v_event == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":17872 * cdef cyruntime.cudaEvent_t cyevent * if event is None: * pevent = 0 # <<<<<<<<<<<<<< * elif isinstance(event, (cudaEvent_t,driver.CUevent)): * pevent = int(event) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pevent = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":17871 * cystream = pstream * cdef cyruntime.cudaEvent_t cyevent * if event is None: # <<<<<<<<<<<<<< * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): */ goto __pyx_L6; } /* "cuda/bindings/runtime.pyx":17873 * if event is None: * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): # <<<<<<<<<<<<<< * pevent = int(event) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_event, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L7_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_event, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUevent); __pyx_t_1 = __pyx_t_2; __pyx_L7_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":17874 * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): * pevent = int(event) # <<<<<<<<<<<<<< * else: * pevent = int(cudaEvent_t(event)) */ __pyx_t_5 = __Pyx_PyNumber_Int(__pyx_v_event); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17874, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_v_pevent = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":17873 * if event is None: * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): # <<<<<<<<<<<<<< * pevent = int(event) * else: */ goto __pyx_L6; } /* "cuda/bindings/runtime.pyx":17876 * pevent = int(event) * else: * pevent = int(cudaEvent_t(event)) # <<<<<<<<<<<<<< * cyevent = pevent * with nogil: */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_event}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17876, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_5)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17876, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_5); __pyx_t_5 = 0; __pyx_v_pevent = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L6:; /* "cuda/bindings/runtime.pyx":17877 * else: * pevent = int(cudaEvent_t(event)) * cyevent = pevent # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaEventRecord(cyevent, cystream) */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pevent); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 17877, __pyx_L1_error) __pyx_v_cyevent = ((cudaEvent_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":17878 * pevent = int(cudaEvent_t(event)) * cyevent = pevent * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaEventRecord(cyevent, cystream) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":17879 * cyevent = pevent * with nogil: * err = cyruntime.cudaEventRecord(cyevent, cystream) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaEventRecord(__pyx_v_cyevent, __pyx_v_cystream); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 17879, __pyx_L10_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":17878 * pevent = int(cudaEvent_t(event)) * cyevent = pevent * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaEventRecord(cyevent, cystream) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L11; } __pyx_L10_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L11:; } } /* "cuda/bindings/runtime.pyx":17880 * with nogil: * err = cyruntime.cudaEventRecord(cyevent, cystream) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17880, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17880, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17880, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17880, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 17880, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":17826 * return (_dict_cudaError_t[err], event) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaEventRecord(event, stream): * """ Records an event. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaEventRecord", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XDECREF(__pyx_v_pevent); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":17882 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaEventRecordWithFlags(event, stream, unsigned int flags): * """ Records an event. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_135cudaEventRecordWithFlags(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_134cudaEventRecordWithFlags, "cudaEventRecordWithFlags(event, stream, unsigned int flags)\n\nRecords an event.\n\nCaptures in `event` the contents of `stream` at the time of this call.\n`event` and `stream` must be on the same CUDA context. Calls such as\n:py:obj:`~.cudaEventQuery()` or :py:obj:`~.cudaStreamWaitEvent()` will\nthen examine or wait for completion of the work that was captured. Uses\nof `stream` after this call do not modify `event`. See note on default\nstream behavior for what is captured in the default case.\n\n:py:obj:`~.cudaEventRecordWithFlags()` can be called multiple times on\nthe same event and will overwrite the previously captured state. Other\nAPIs such as :py:obj:`~.cudaStreamWaitEvent()` use the most recently\ncaptured state at the time of the API call, and are not affected by\nlater calls to :py:obj:`~.cudaEventRecordWithFlags()`. Before the first\ncall to :py:obj:`~.cudaEventRecordWithFlags()`, an event represents an\nempty set of work, so for example :py:obj:`~.cudaEventQuery()` would\nreturn :py:obj:`~.cudaSuccess`.\n\nflags include:\n\n- :py:obj:`~.cudaEventRecordDefault`: Default event creation flag.\n\n- :py:obj:`~.cudaEventRecordExternal`: Event is captured in the graph\n as an external event node when performing stream capture.\n\nParameters\n----------\nevent : :py:obj:`~.CUevent` or :py:obj:`~.cudaEvent_t`\n Event to record\nstream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Stream in which to record event\nflags : unsigned int\n Parameters for the operation(See above)\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidResourceHandle`, :py:obj:`~.cudaErrorLaunchFailure`\n\nSee Also\n--------\n:py:obj:`~.cudaEventCreate (C API)`, :py:obj:`~.cudaEventCreateWithFlags`, :py:obj:`~.cudaEventQuery`, :py:obj:`~.cudaEventSynchronize`, :py:obj:`~.cudaEventDestroy`, :py:obj:`~.cudaEventElapsedTime`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cudaEventRecord`, :py:obj:`~.cuEv""entRecord`,"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_135cudaEventRecordWithFlags = {"cudaEventRecordWithFlags", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_135cudaEventRecordWithFlags, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_134cudaEventRecordWithFlags}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_135cudaEventRecordWithFlags(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_event = 0; PyObject *__pyx_v_stream = 0; unsigned int __pyx_v_flags; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[3] = {0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaEventRecordWithFlags (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_event_2,&__pyx_mstate_global->__pyx_n_u_stream,&__pyx_mstate_global->__pyx_n_u_flags_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 17882, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 17882, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 17882, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17882, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaEventRecordWithFlags", 0) < (0)) __PYX_ERR(0, 17882, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 3; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaEventRecordWithFlags", 1, 3, 3, i); __PYX_ERR(0, 17882, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 3)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17882, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 17882, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 17882, __pyx_L3_error) } __pyx_v_event = values[0]; __pyx_v_stream = values[1]; __pyx_v_flags = __Pyx_PyLong_As_unsigned_int(values[2]); if (unlikely((__pyx_v_flags == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 17883, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaEventRecordWithFlags", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 17882, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaEventRecordWithFlags", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_134cudaEventRecordWithFlags(__pyx_self, __pyx_v_event, __pyx_v_stream, __pyx_v_flags); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_134cudaEventRecordWithFlags(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_event, PyObject *__pyx_v_stream, unsigned int __pyx_v_flags) { cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; cudaEvent_t __pyx_v_cyevent; PyObject *__pyx_v_pevent = NULL; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaEventRecordWithFlags", 0); /* "cuda/bindings/runtime.pyx":17928 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":17929 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":17928 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":17930 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":17931 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":17930 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":17933 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * cdef cyruntime.cudaEvent_t cyevent */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_stream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17933, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17933, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":17934 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * cdef cyruntime.cudaEvent_t cyevent * if event is None: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 17934, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":17936 * cystream = pstream * cdef cyruntime.cudaEvent_t cyevent * if event is None: # <<<<<<<<<<<<<< * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): */ __pyx_t_1 = (__pyx_v_event == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":17937 * cdef cyruntime.cudaEvent_t cyevent * if event is None: * pevent = 0 # <<<<<<<<<<<<<< * elif isinstance(event, (cudaEvent_t,driver.CUevent)): * pevent = int(event) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pevent = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":17936 * cystream = pstream * cdef cyruntime.cudaEvent_t cyevent * if event is None: # <<<<<<<<<<<<<< * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): */ goto __pyx_L6; } /* "cuda/bindings/runtime.pyx":17938 * if event is None: * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): # <<<<<<<<<<<<<< * pevent = int(event) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_event, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L7_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_event, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUevent); __pyx_t_1 = __pyx_t_2; __pyx_L7_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":17939 * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): * pevent = int(event) # <<<<<<<<<<<<<< * else: * pevent = int(cudaEvent_t(event)) */ __pyx_t_5 = __Pyx_PyNumber_Int(__pyx_v_event); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17939, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_v_pevent = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":17938 * if event is None: * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): # <<<<<<<<<<<<<< * pevent = int(event) * else: */ goto __pyx_L6; } /* "cuda/bindings/runtime.pyx":17941 * pevent = int(event) * else: * pevent = int(cudaEvent_t(event)) # <<<<<<<<<<<<<< * cyevent = pevent * with nogil: */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_event}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17941, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_5)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17941, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_5); __pyx_t_5 = 0; __pyx_v_pevent = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L6:; /* "cuda/bindings/runtime.pyx":17942 * else: * pevent = int(cudaEvent_t(event)) * cyevent = pevent # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaEventRecordWithFlags(cyevent, cystream, flags) */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pevent); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 17942, __pyx_L1_error) __pyx_v_cyevent = ((cudaEvent_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":17943 * pevent = int(cudaEvent_t(event)) * cyevent = pevent * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaEventRecordWithFlags(cyevent, cystream, flags) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":17944 * cyevent = pevent * with nogil: * err = cyruntime.cudaEventRecordWithFlags(cyevent, cystream, flags) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaEventRecordWithFlags(__pyx_v_cyevent, __pyx_v_cystream, __pyx_v_flags); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 17944, __pyx_L10_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":17943 * pevent = int(cudaEvent_t(event)) * cyevent = pevent * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaEventRecordWithFlags(cyevent, cystream, flags) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L11; } __pyx_L10_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L11:; } } /* "cuda/bindings/runtime.pyx":17945 * with nogil: * err = cyruntime.cudaEventRecordWithFlags(cyevent, cystream, flags) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17945, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17945, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17945, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17945, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 17945, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":17882 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaEventRecordWithFlags(event, stream, unsigned int flags): * """ Records an event. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaEventRecordWithFlags", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XDECREF(__pyx_v_pevent); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":17947 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaEventQuery(event): * """ Queries an event's status. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_137cudaEventQuery(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_136cudaEventQuery, "cudaEventQuery(event)\n\nQueries an event's status.\n\nQueries the status of all work currently captured by `event`. See\n:py:obj:`~.cudaEventRecord()` for details on what is captured by an\nevent.\n\nReturns :py:obj:`~.cudaSuccess` if all captured work has been\ncompleted, or :py:obj:`~.cudaErrorNotReady` if any captured work is\nincomplete.\n\nFor the purposes of Unified Memory, a return value of\n:py:obj:`~.cudaSuccess` is equivalent to having called\n:py:obj:`~.cudaEventSynchronize()`.\n\nParameters\n----------\nevent : :py:obj:`~.CUevent` or :py:obj:`~.cudaEvent_t`\n Event to query\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorNotReady`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidResourceHandle`, :py:obj:`~.cudaErrorLaunchFailure`\n\nSee Also\n--------\n:py:obj:`~.cudaEventCreate (C API)`, :py:obj:`~.cudaEventCreateWithFlags`, :py:obj:`~.cudaEventRecord`, :py:obj:`~.cudaEventSynchronize`, :py:obj:`~.cudaEventDestroy`, :py:obj:`~.cudaEventElapsedTime`, :py:obj:`~.cuEventQuery`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_137cudaEventQuery = {"cudaEventQuery", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_137cudaEventQuery, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_136cudaEventQuery}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_137cudaEventQuery(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_event = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaEventQuery (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_event_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 17947, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17947, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaEventQuery", 0) < (0)) __PYX_ERR(0, 17947, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaEventQuery", 1, 1, 1, i); __PYX_ERR(0, 17947, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17947, __pyx_L3_error) } __pyx_v_event = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaEventQuery", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 17947, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaEventQuery", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_136cudaEventQuery(__pyx_self, __pyx_v_event); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_136cudaEventQuery(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_event) { cudaEvent_t __pyx_v_cyevent; PyObject *__pyx_v_pevent = NULL; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaEventQuery", 0); /* "cuda/bindings/runtime.pyx":17978 * """ * cdef cyruntime.cudaEvent_t cyevent * if event is None: # <<<<<<<<<<<<<< * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): */ __pyx_t_1 = (__pyx_v_event == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":17979 * cdef cyruntime.cudaEvent_t cyevent * if event is None: * pevent = 0 # <<<<<<<<<<<<<< * elif isinstance(event, (cudaEvent_t,driver.CUevent)): * pevent = int(event) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pevent = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":17978 * """ * cdef cyruntime.cudaEvent_t cyevent * if event is None: # <<<<<<<<<<<<<< * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":17980 * if event is None: * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): # <<<<<<<<<<<<<< * pevent = int(event) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_event, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_event, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUevent); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":17981 * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): * pevent = int(event) # <<<<<<<<<<<<<< * else: * pevent = int(cudaEvent_t(event)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_event); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17981, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pevent = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":17980 * if event is None: * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): # <<<<<<<<<<<<<< * pevent = int(event) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":17983 * pevent = int(event) * else: * pevent = int(cudaEvent_t(event)) # <<<<<<<<<<<<<< * cyevent = pevent * with nogil: */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_event}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17983, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17983, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pevent = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":17984 * else: * pevent = int(cudaEvent_t(event)) * cyevent = pevent # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaEventQuery(cyevent) */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pevent); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 17984, __pyx_L1_error) __pyx_v_cyevent = ((cudaEvent_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":17985 * pevent = int(cudaEvent_t(event)) * cyevent = pevent * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaEventQuery(cyevent) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":17986 * cyevent = pevent * with nogil: * err = cyruntime.cudaEventQuery(cyevent) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaEventQuery(__pyx_v_cyevent); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 17986, __pyx_L7_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":17985 * pevent = int(cudaEvent_t(event)) * cyevent = pevent * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaEventQuery(cyevent) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":17987 * with nogil: * err = cyruntime.cudaEventQuery(cyevent) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17987, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17987, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17987, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17987, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 17987, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":17947 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaEventQuery(event): * """ Queries an event's status. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaEventQuery", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pevent); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":17989 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaEventSynchronize(event): * """ Waits for an event to complete. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_139cudaEventSynchronize(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_138cudaEventSynchronize, "cudaEventSynchronize(event)\n\nWaits for an event to complete.\n\nWaits until the completion of all work currently captured in `event`.\nSee :py:obj:`~.cudaEventRecord()` for details on what is captured by an\nevent.\n\nWaiting for an event that was created with the\n:py:obj:`~.cudaEventBlockingSync` flag will cause the calling CPU\nthread to block until the event has been completed by the device. If\nthe :py:obj:`~.cudaEventBlockingSync` flag has not been set, then the\nCPU thread will busy-wait until the event has been completed by the\ndevice.\n\nParameters\n----------\nevent : :py:obj:`~.CUevent` or :py:obj:`~.cudaEvent_t`\n Event to wait for\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidResourceHandle`, :py:obj:`~.cudaErrorLaunchFailure`\n\nSee Also\n--------\n:py:obj:`~.cudaEventCreate (C API)`, :py:obj:`~.cudaEventCreateWithFlags`, :py:obj:`~.cudaEventRecord`, :py:obj:`~.cudaEventQuery`, :py:obj:`~.cudaEventDestroy`, :py:obj:`~.cudaEventElapsedTime`, :py:obj:`~.cuEventSynchronize`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_139cudaEventSynchronize = {"cudaEventSynchronize", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_139cudaEventSynchronize, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_138cudaEventSynchronize}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_139cudaEventSynchronize(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_event = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaEventSynchronize (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_event_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 17989, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17989, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaEventSynchronize", 0) < (0)) __PYX_ERR(0, 17989, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaEventSynchronize", 1, 1, 1, i); __PYX_ERR(0, 17989, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 17989, __pyx_L3_error) } __pyx_v_event = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaEventSynchronize", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 17989, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaEventSynchronize", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_138cudaEventSynchronize(__pyx_self, __pyx_v_event); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_138cudaEventSynchronize(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_event) { cudaEvent_t __pyx_v_cyevent; PyObject *__pyx_v_pevent = NULL; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaEventSynchronize", 0); /* "cuda/bindings/runtime.pyx":18019 * """ * cdef cyruntime.cudaEvent_t cyevent * if event is None: # <<<<<<<<<<<<<< * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): */ __pyx_t_1 = (__pyx_v_event == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18020 * cdef cyruntime.cudaEvent_t cyevent * if event is None: * pevent = 0 # <<<<<<<<<<<<<< * elif isinstance(event, (cudaEvent_t,driver.CUevent)): * pevent = int(event) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pevent = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":18019 * """ * cdef cyruntime.cudaEvent_t cyevent * if event is None: # <<<<<<<<<<<<<< * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":18021 * if event is None: * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): # <<<<<<<<<<<<<< * pevent = int(event) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_event, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_event, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUevent); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18022 * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): * pevent = int(event) # <<<<<<<<<<<<<< * else: * pevent = int(cudaEvent_t(event)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_event); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18022, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pevent = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":18021 * if event is None: * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): # <<<<<<<<<<<<<< * pevent = int(event) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":18024 * pevent = int(event) * else: * pevent = int(cudaEvent_t(event)) # <<<<<<<<<<<<<< * cyevent = pevent * with nogil: */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_event}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18024, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18024, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pevent = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":18025 * else: * pevent = int(cudaEvent_t(event)) * cyevent = pevent # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaEventSynchronize(cyevent) */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pevent); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 18025, __pyx_L1_error) __pyx_v_cyevent = ((cudaEvent_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":18026 * pevent = int(cudaEvent_t(event)) * cyevent = pevent * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaEventSynchronize(cyevent) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":18027 * cyevent = pevent * with nogil: * err = cyruntime.cudaEventSynchronize(cyevent) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaEventSynchronize(__pyx_v_cyevent); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 18027, __pyx_L7_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":18026 * pevent = int(cudaEvent_t(event)) * cyevent = pevent * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaEventSynchronize(cyevent) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":18028 * with nogil: * err = cyruntime.cudaEventSynchronize(cyevent) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18028, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18028, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18028, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18028, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 18028, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":17989 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaEventSynchronize(event): * """ Waits for an event to complete. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaEventSynchronize", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pevent); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":18030 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaEventDestroy(event): * """ Destroys an event object. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_141cudaEventDestroy(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_140cudaEventDestroy, "cudaEventDestroy(event)\n\nDestroys an event object.\n\nDestroys the event specified by `event`.\n\nAn event may be destroyed before it is complete (i.e., while\n:py:obj:`~.cudaEventQuery()` would return\n:py:obj:`~.cudaErrorNotReady`). In this case, the call does not block\non completion of the event, and any associated resources will\nautomatically be released asynchronously at completion.\n\nParameters\n----------\nevent : :py:obj:`~.CUevent` or :py:obj:`~.cudaEvent_t`\n Event to destroy\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidResourceHandle`, :py:obj:`~.cudaErrorLaunchFailure`\n\nSee Also\n--------\n:py:obj:`~.cudaEventCreate (C API)`, :py:obj:`~.cudaEventCreateWithFlags`, :py:obj:`~.cudaEventQuery`, :py:obj:`~.cudaEventSynchronize`, :py:obj:`~.cudaEventRecord`, :py:obj:`~.cudaEventElapsedTime`, :py:obj:`~.cuEventDestroy`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_141cudaEventDestroy = {"cudaEventDestroy", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_141cudaEventDestroy, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_140cudaEventDestroy}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_141cudaEventDestroy(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_event = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaEventDestroy (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_event_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 18030, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18030, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaEventDestroy", 0) < (0)) __PYX_ERR(0, 18030, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaEventDestroy", 1, 1, 1, i); __PYX_ERR(0, 18030, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18030, __pyx_L3_error) } __pyx_v_event = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaEventDestroy", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 18030, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaEventDestroy", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_140cudaEventDestroy(__pyx_self, __pyx_v_event); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_140cudaEventDestroy(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_event) { cudaEvent_t __pyx_v_cyevent; PyObject *__pyx_v_pevent = NULL; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaEventDestroy", 0); /* "cuda/bindings/runtime.pyx":18057 * """ * cdef cyruntime.cudaEvent_t cyevent * if event is None: # <<<<<<<<<<<<<< * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): */ __pyx_t_1 = (__pyx_v_event == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18058 * cdef cyruntime.cudaEvent_t cyevent * if event is None: * pevent = 0 # <<<<<<<<<<<<<< * elif isinstance(event, (cudaEvent_t,driver.CUevent)): * pevent = int(event) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pevent = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":18057 * """ * cdef cyruntime.cudaEvent_t cyevent * if event is None: # <<<<<<<<<<<<<< * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":18059 * if event is None: * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): # <<<<<<<<<<<<<< * pevent = int(event) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_event, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_event, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUevent); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18060 * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): * pevent = int(event) # <<<<<<<<<<<<<< * else: * pevent = int(cudaEvent_t(event)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_event); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18060, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pevent = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":18059 * if event is None: * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): # <<<<<<<<<<<<<< * pevent = int(event) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":18062 * pevent = int(event) * else: * pevent = int(cudaEvent_t(event)) # <<<<<<<<<<<<<< * cyevent = pevent * with nogil: */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_event}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18062, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18062, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pevent = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":18063 * else: * pevent = int(cudaEvent_t(event)) * cyevent = pevent # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaEventDestroy(cyevent) */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pevent); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 18063, __pyx_L1_error) __pyx_v_cyevent = ((cudaEvent_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":18064 * pevent = int(cudaEvent_t(event)) * cyevent = pevent * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaEventDestroy(cyevent) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":18065 * cyevent = pevent * with nogil: * err = cyruntime.cudaEventDestroy(cyevent) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaEventDestroy(__pyx_v_cyevent); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 18065, __pyx_L7_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":18064 * pevent = int(cudaEvent_t(event)) * cyevent = pevent * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaEventDestroy(cyevent) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":18066 * with nogil: * err = cyruntime.cudaEventDestroy(cyevent) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18066, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18066, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18066, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18066, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 18066, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":18030 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaEventDestroy(event): * """ Destroys an event object. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaEventDestroy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pevent); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":18068 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaEventElapsedTime(start, end): * """ Computes the elapsed time between events. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_143cudaEventElapsedTime(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_142cudaEventElapsedTime, "cudaEventElapsedTime(start, end)\n\nComputes the elapsed time between events.\n\nComputes the elapsed time between two events (in milliseconds with a\nresolution of around 0.5 microseconds).\n\nIf either event was last recorded in a non-NULL stream, the resulting\ntime may be greater than expected (even if both used the same stream\nhandle). This happens because the :py:obj:`~.cudaEventRecord()`\noperation takes place asynchronously and there is no guarantee that the\nmeasured latency is actually just between the two events. Any number of\nother different stream operations could execute in between the two\nmeasured events, thus altering the timing in a significant way.\n\nIf :py:obj:`~.cudaEventRecord()` has not been called on either event,\nthen :py:obj:`~.cudaErrorInvalidResourceHandle` is returned. If\n:py:obj:`~.cudaEventRecord()` has been called on both events but one or\nboth of them has not yet been completed (that is,\n:py:obj:`~.cudaEventQuery()` would return :py:obj:`~.cudaErrorNotReady`\non at least one of the events), :py:obj:`~.cudaErrorNotReady` is\nreturned. If either event was created with the\n:py:obj:`~.cudaEventDisableTiming` flag, then this function will return\n:py:obj:`~.cudaErrorInvalidResourceHandle`.\n\nParameters\n----------\nstart : :py:obj:`~.CUevent` or :py:obj:`~.cudaEvent_t`\n Starting event\nend : :py:obj:`~.CUevent` or :py:obj:`~.cudaEvent_t`\n Ending event\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorNotReady`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidResourceHandle`, :py:obj:`~.cudaErrorLaunchFailure`, :py:obj:`~.cudaErrorUnknown`\nms : float\n Time between `start` and `end` in ms\n\nSee Also\n--------\n:py:obj:`~.cudaEventCreate (C API)`, :py:obj:`~.cudaEventCreateWithFlags`, :py:obj:`~.cudaEventQuery`, :py:obj:`~.cudaEventSynchronize`, :py:obj:`~.cudaEventDestroy`, :py:obj:`~.cudaEventRecord`, :py:obj:`~.cuEventElapsedTime`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_143cudaEventElapsedTime = {"cudaEventElapsedTime", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_143cudaEventElapsedTime, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_142cudaEventElapsedTime}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_143cudaEventElapsedTime(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_start = 0; PyObject *__pyx_v_end = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaEventElapsedTime (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_start,&__pyx_mstate_global->__pyx_n_u_end,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 18068, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 18068, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18068, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaEventElapsedTime", 0) < (0)) __PYX_ERR(0, 18068, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaEventElapsedTime", 1, 2, 2, i); __PYX_ERR(0, 18068, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18068, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 18068, __pyx_L3_error) } __pyx_v_start = values[0]; __pyx_v_end = values[1]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaEventElapsedTime", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 18068, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaEventElapsedTime", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_142cudaEventElapsedTime(__pyx_self, __pyx_v_start, __pyx_v_end); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_142cudaEventElapsedTime(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_start, PyObject *__pyx_v_end) { cudaEvent_t __pyx_v_cyend; PyObject *__pyx_v_pend = NULL; cudaEvent_t __pyx_v_cystart; PyObject *__pyx_v_pstart = NULL; float __pyx_v_ms; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaEventElapsedTime", 0); /* "cuda/bindings/runtime.pyx":18112 * """ * cdef cyruntime.cudaEvent_t cyend * if end is None: # <<<<<<<<<<<<<< * pend = 0 * elif isinstance(end, (cudaEvent_t,driver.CUevent)): */ __pyx_t_1 = (__pyx_v_end == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18113 * cdef cyruntime.cudaEvent_t cyend * if end is None: * pend = 0 # <<<<<<<<<<<<<< * elif isinstance(end, (cudaEvent_t,driver.CUevent)): * pend = int(end) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pend = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":18112 * """ * cdef cyruntime.cudaEvent_t cyend * if end is None: # <<<<<<<<<<<<<< * pend = 0 * elif isinstance(end, (cudaEvent_t,driver.CUevent)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":18114 * if end is None: * pend = 0 * elif isinstance(end, (cudaEvent_t,driver.CUevent)): # <<<<<<<<<<<<<< * pend = int(end) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_end, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_end, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUevent); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18115 * pend = 0 * elif isinstance(end, (cudaEvent_t,driver.CUevent)): * pend = int(end) # <<<<<<<<<<<<<< * else: * pend = int(cudaEvent_t(end)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_end); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18115, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pend = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":18114 * if end is None: * pend = 0 * elif isinstance(end, (cudaEvent_t,driver.CUevent)): # <<<<<<<<<<<<<< * pend = int(end) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":18117 * pend = int(end) * else: * pend = int(cudaEvent_t(end)) # <<<<<<<<<<<<<< * cyend = pend * cdef cyruntime.cudaEvent_t cystart */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_end}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18117, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18117, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pend = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":18118 * else: * pend = int(cudaEvent_t(end)) * cyend = pend # <<<<<<<<<<<<<< * cdef cyruntime.cudaEvent_t cystart * if start is None: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pend); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 18118, __pyx_L1_error) __pyx_v_cyend = ((cudaEvent_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":18120 * cyend = pend * cdef cyruntime.cudaEvent_t cystart * if start is None: # <<<<<<<<<<<<<< * pstart = 0 * elif isinstance(start, (cudaEvent_t,driver.CUevent)): */ __pyx_t_1 = (__pyx_v_start == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18121 * cdef cyruntime.cudaEvent_t cystart * if start is None: * pstart = 0 # <<<<<<<<<<<<<< * elif isinstance(start, (cudaEvent_t,driver.CUevent)): * pstart = int(start) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstart = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":18120 * cyend = pend * cdef cyruntime.cudaEvent_t cystart * if start is None: # <<<<<<<<<<<<<< * pstart = 0 * elif isinstance(start, (cudaEvent_t,driver.CUevent)): */ goto __pyx_L6; } /* "cuda/bindings/runtime.pyx":18122 * if start is None: * pstart = 0 * elif isinstance(start, (cudaEvent_t,driver.CUevent)): # <<<<<<<<<<<<<< * pstart = int(start) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_start, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L7_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_start, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUevent); __pyx_t_1 = __pyx_t_2; __pyx_L7_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18123 * pstart = 0 * elif isinstance(start, (cudaEvent_t,driver.CUevent)): * pstart = int(start) # <<<<<<<<<<<<<< * else: * pstart = int(cudaEvent_t(start)) */ __pyx_t_5 = __Pyx_PyNumber_Int(__pyx_v_start); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18123, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_v_pstart = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":18122 * if start is None: * pstart = 0 * elif isinstance(start, (cudaEvent_t,driver.CUevent)): # <<<<<<<<<<<<<< * pstart = int(start) * else: */ goto __pyx_L6; } /* "cuda/bindings/runtime.pyx":18125 * pstart = int(start) * else: * pstart = int(cudaEvent_t(start)) # <<<<<<<<<<<<<< * cystart = pstart * cdef float ms = 0 */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_start}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18125, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_5)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18125, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_5); __pyx_t_5 = 0; __pyx_v_pstart = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L6:; /* "cuda/bindings/runtime.pyx":18126 * else: * pstart = int(cudaEvent_t(start)) * cystart = pstart # <<<<<<<<<<<<<< * cdef float ms = 0 * with nogil: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstart); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 18126, __pyx_L1_error) __pyx_v_cystart = ((cudaEvent_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":18127 * pstart = int(cudaEvent_t(start)) * cystart = pstart * cdef float ms = 0 # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaEventElapsedTime(&ms, cystart, cyend) */ __pyx_v_ms = 0.0; /* "cuda/bindings/runtime.pyx":18128 * cystart = pstart * cdef float ms = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaEventElapsedTime(&ms, cystart, cyend) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":18129 * cdef float ms = 0 * with nogil: * err = cyruntime.cudaEventElapsedTime(&ms, cystart, cyend) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaEventElapsedTime((&__pyx_v_ms), __pyx_v_cystart, __pyx_v_cyend); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 18129, __pyx_L10_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":18128 * cystart = pstart * cdef float ms = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaEventElapsedTime(&ms, cystart, cyend) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L11; } __pyx_L10_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L11:; } } /* "cuda/bindings/runtime.pyx":18130 * with nogil: * err = cyruntime.cudaEventElapsedTime(&ms, cystart, cyend) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], ms) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18131 * err = cyruntime.cudaEventElapsedTime(&ms, cystart, cyend) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], ms) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18131, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18131, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18131, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18131, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 18131, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, Py_None) != (0)) __PYX_ERR(0, 18131, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":18130 * with nogil: * err = cyruntime.cudaEventElapsedTime(&ms, cystart, cyend) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], ms) */ } /* "cuda/bindings/runtime.pyx":18132 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], ms) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18132, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18132, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18132, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyFloat_FromDouble(__pyx_v_ms); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18132, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18132, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 18132, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_3) != (0)) __PYX_ERR(0, 18132, __pyx_L1_error); __pyx_t_4 = 0; __pyx_t_3 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":18068 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaEventElapsedTime(start, end): * """ Computes the elapsed time between events. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaEventElapsedTime", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pend); __Pyx_XDECREF(__pyx_v_pstart); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":18134 * return (_dict_cudaError_t[err], ms) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaEventElapsedTime_v2(start, end): * """ Computes the elapsed time between events. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_145cudaEventElapsedTime_v2(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_144cudaEventElapsedTime_v2, "cudaEventElapsedTime_v2(start, end)\n\nComputes the elapsed time between events.\n\nComputes the elapsed time between two events (in milliseconds with a\nresolution of around 0.5 microseconds). Note this API is not guaranteed\nto return the latest errors for pending work. As such this API is\nintended to serve as a elapsed time calculation only and polling for\ncompletion on the events to be compared should be done with\n:py:obj:`~.cudaEventQuery` instead.\n\nIf either event was last recorded in a non-NULL stream, the resulting\ntime may be greater than expected (even if both used the same stream\nhandle). This happens because the :py:obj:`~.cudaEventRecord()`\noperation takes place asynchronously and there is no guarantee that the\nmeasured latency is actually just between the two events. Any number of\nother different stream operations could execute in between the two\nmeasured events, thus altering the timing in a significant way.\n\nIf :py:obj:`~.cudaEventRecord()` has not been called on either event,\nthen :py:obj:`~.cudaErrorInvalidResourceHandle` is returned. If\n:py:obj:`~.cudaEventRecord()` has been called on both events but one or\nboth of them has not yet been completed (that is,\n:py:obj:`~.cudaEventQuery()` would return :py:obj:`~.cudaErrorNotReady`\non at least one of the events), :py:obj:`~.cudaErrorNotReady` is\nreturned. If either event was created with the\n:py:obj:`~.cudaEventDisableTiming` flag, then this function will return\n:py:obj:`~.cudaErrorInvalidResourceHandle`.\n\nParameters\n----------\nstart : :py:obj:`~.CUevent` or :py:obj:`~.cudaEvent_t`\n Starting event\nend : :py:obj:`~.CUevent` or :py:obj:`~.cudaEvent_t`\n Ending event\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorNotReady`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidResourceHandle`, :py:obj:`~.cudaErrorLaunchFailure`, :py:obj:`~.cudaErrorUnknown`\nms : float\n Time between `start` and `end` in ms\n\nSee Also\n------""--\n:py:obj:`~.cudaEventCreate (C API)`, :py:obj:`~.cudaEventCreateWithFlags`, :py:obj:`~.cudaEventQuery`, :py:obj:`~.cudaEventSynchronize`, :py:obj:`~.cudaEventDestroy`, :py:obj:`~.cudaEventRecord`, :py:obj:`~.cuEventElapsedTime`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_145cudaEventElapsedTime_v2 = {"cudaEventElapsedTime_v2", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_145cudaEventElapsedTime_v2, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_144cudaEventElapsedTime_v2}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_145cudaEventElapsedTime_v2(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_start = 0; PyObject *__pyx_v_end = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaEventElapsedTime_v2 (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_start,&__pyx_mstate_global->__pyx_n_u_end,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 18134, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 18134, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18134, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaEventElapsedTime_v2", 0) < (0)) __PYX_ERR(0, 18134, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaEventElapsedTime_v2", 1, 2, 2, i); __PYX_ERR(0, 18134, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18134, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 18134, __pyx_L3_error) } __pyx_v_start = values[0]; __pyx_v_end = values[1]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaEventElapsedTime_v2", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 18134, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaEventElapsedTime_v2", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_144cudaEventElapsedTime_v2(__pyx_self, __pyx_v_start, __pyx_v_end); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_144cudaEventElapsedTime_v2(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_start, PyObject *__pyx_v_end) { cudaEvent_t __pyx_v_cyend; PyObject *__pyx_v_pend = NULL; cudaEvent_t __pyx_v_cystart; PyObject *__pyx_v_pstart = NULL; float __pyx_v_ms; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaEventElapsedTime_v2", 0); /* "cuda/bindings/runtime.pyx":18182 * """ * cdef cyruntime.cudaEvent_t cyend * if end is None: # <<<<<<<<<<<<<< * pend = 0 * elif isinstance(end, (cudaEvent_t,driver.CUevent)): */ __pyx_t_1 = (__pyx_v_end == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18183 * cdef cyruntime.cudaEvent_t cyend * if end is None: * pend = 0 # <<<<<<<<<<<<<< * elif isinstance(end, (cudaEvent_t,driver.CUevent)): * pend = int(end) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pend = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":18182 * """ * cdef cyruntime.cudaEvent_t cyend * if end is None: # <<<<<<<<<<<<<< * pend = 0 * elif isinstance(end, (cudaEvent_t,driver.CUevent)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":18184 * if end is None: * pend = 0 * elif isinstance(end, (cudaEvent_t,driver.CUevent)): # <<<<<<<<<<<<<< * pend = int(end) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_end, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_end, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUevent); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18185 * pend = 0 * elif isinstance(end, (cudaEvent_t,driver.CUevent)): * pend = int(end) # <<<<<<<<<<<<<< * else: * pend = int(cudaEvent_t(end)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_end); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18185, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pend = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":18184 * if end is None: * pend = 0 * elif isinstance(end, (cudaEvent_t,driver.CUevent)): # <<<<<<<<<<<<<< * pend = int(end) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":18187 * pend = int(end) * else: * pend = int(cudaEvent_t(end)) # <<<<<<<<<<<<<< * cyend = pend * cdef cyruntime.cudaEvent_t cystart */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_end}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18187, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18187, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pend = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":18188 * else: * pend = int(cudaEvent_t(end)) * cyend = pend # <<<<<<<<<<<<<< * cdef cyruntime.cudaEvent_t cystart * if start is None: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pend); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 18188, __pyx_L1_error) __pyx_v_cyend = ((cudaEvent_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":18190 * cyend = pend * cdef cyruntime.cudaEvent_t cystart * if start is None: # <<<<<<<<<<<<<< * pstart = 0 * elif isinstance(start, (cudaEvent_t,driver.CUevent)): */ __pyx_t_1 = (__pyx_v_start == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18191 * cdef cyruntime.cudaEvent_t cystart * if start is None: * pstart = 0 # <<<<<<<<<<<<<< * elif isinstance(start, (cudaEvent_t,driver.CUevent)): * pstart = int(start) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstart = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":18190 * cyend = pend * cdef cyruntime.cudaEvent_t cystart * if start is None: # <<<<<<<<<<<<<< * pstart = 0 * elif isinstance(start, (cudaEvent_t,driver.CUevent)): */ goto __pyx_L6; } /* "cuda/bindings/runtime.pyx":18192 * if start is None: * pstart = 0 * elif isinstance(start, (cudaEvent_t,driver.CUevent)): # <<<<<<<<<<<<<< * pstart = int(start) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_start, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L7_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_start, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUevent); __pyx_t_1 = __pyx_t_2; __pyx_L7_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18193 * pstart = 0 * elif isinstance(start, (cudaEvent_t,driver.CUevent)): * pstart = int(start) # <<<<<<<<<<<<<< * else: * pstart = int(cudaEvent_t(start)) */ __pyx_t_5 = __Pyx_PyNumber_Int(__pyx_v_start); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18193, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_v_pstart = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":18192 * if start is None: * pstart = 0 * elif isinstance(start, (cudaEvent_t,driver.CUevent)): # <<<<<<<<<<<<<< * pstart = int(start) * else: */ goto __pyx_L6; } /* "cuda/bindings/runtime.pyx":18195 * pstart = int(start) * else: * pstart = int(cudaEvent_t(start)) # <<<<<<<<<<<<<< * cystart = pstart * cdef float ms = 0 */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_start}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18195, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_5)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18195, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_5); __pyx_t_5 = 0; __pyx_v_pstart = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L6:; /* "cuda/bindings/runtime.pyx":18196 * else: * pstart = int(cudaEvent_t(start)) * cystart = pstart # <<<<<<<<<<<<<< * cdef float ms = 0 * with nogil: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstart); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 18196, __pyx_L1_error) __pyx_v_cystart = ((cudaEvent_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":18197 * pstart = int(cudaEvent_t(start)) * cystart = pstart * cdef float ms = 0 # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaEventElapsedTime_v2(&ms, cystart, cyend) */ __pyx_v_ms = 0.0; /* "cuda/bindings/runtime.pyx":18198 * cystart = pstart * cdef float ms = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaEventElapsedTime_v2(&ms, cystart, cyend) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":18199 * cdef float ms = 0 * with nogil: * err = cyruntime.cudaEventElapsedTime_v2(&ms, cystart, cyend) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaEventElapsedTime_v2((&__pyx_v_ms), __pyx_v_cystart, __pyx_v_cyend); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 18199, __pyx_L10_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":18198 * cystart = pstart * cdef float ms = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaEventElapsedTime_v2(&ms, cystart, cyend) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L11; } __pyx_L10_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L11:; } } /* "cuda/bindings/runtime.pyx":18200 * with nogil: * err = cyruntime.cudaEventElapsedTime_v2(&ms, cystart, cyend) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], ms) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18201 * err = cyruntime.cudaEventElapsedTime_v2(&ms, cystart, cyend) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], ms) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18201, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18201, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18201, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18201, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 18201, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, Py_None) != (0)) __PYX_ERR(0, 18201, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":18200 * with nogil: * err = cyruntime.cudaEventElapsedTime_v2(&ms, cystart, cyend) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], ms) */ } /* "cuda/bindings/runtime.pyx":18202 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], ms) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18202, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18202, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18202, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyFloat_FromDouble(__pyx_v_ms); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18202, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18202, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 18202, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_3) != (0)) __PYX_ERR(0, 18202, __pyx_L1_error); __pyx_t_4 = 0; __pyx_t_3 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":18134 * return (_dict_cudaError_t[err], ms) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaEventElapsedTime_v2(start, end): * """ Computes the elapsed time between events. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaEventElapsedTime_v2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pend); __Pyx_XDECREF(__pyx_v_pstart); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":18204 * return (_dict_cudaError_t[err], ms) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaImportExternalMemory(memHandleDesc : Optional[cudaExternalMemoryHandleDesc]): * """ Imports an external memory object. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_147cudaImportExternalMemory(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_146cudaImportExternalMemory, "cudaImportExternalMemory(cudaExternalMemoryHandleDesc memHandleDesc: Optional[cudaExternalMemoryHandleDesc])\n\nImports an external memory object.\n\nImports an externally allocated memory object and returns a handle to\nthat in `extMem_out`.\n\nThe properties of the handle being imported must be described in\n`memHandleDesc`. The :py:obj:`~.cudaExternalMemoryHandleDesc` structure\nis defined as follows:\n\n**View CUDA Toolkit Documentation for a C++ code example**\n\nwhere :py:obj:`~.cudaExternalMemoryHandleDesc.type` specifies the type\nof handle being imported. :py:obj:`~.cudaExternalMemoryHandleType` is\ndefined as:\n\n**View CUDA Toolkit Documentation for a C++ code example**\n\nIf :py:obj:`~.cudaExternalMemoryHandleDesc.type` is\n:py:obj:`~.cudaExternalMemoryHandleTypeOpaqueFd`, then\n:py:obj:`~.cudaExternalMemoryHandleDesc`::handle::fd must be a valid\nfile descriptor referencing a memory object. Ownership of the file\ndescriptor is transferred to the CUDA driver when the handle is\nimported successfully. Performing any operations on the file descriptor\nafter it is imported results in undefined behavior.\n\nIf :py:obj:`~.cudaExternalMemoryHandleDesc.type` is\n:py:obj:`~.cudaExternalMemoryHandleTypeOpaqueWin32`, then exactly one\nof :py:obj:`~.cudaExternalMemoryHandleDesc`::handle::win32::handle and\n:py:obj:`~.cudaExternalMemoryHandleDesc`::handle::win32::name must not\nbe NULL. If\n:py:obj:`~.cudaExternalMemoryHandleDesc`::handle::win32::handle is not\nNULL, then it must represent a valid shared NT handle that references a\nmemory object. Ownership of this handle is not transferred to CUDA\nafter the import operation, so the application must release the handle\nusing the appropriate system call. If\n:py:obj:`~.cudaExternalMemoryHandleDesc`::handle::win32::name is not\nNULL, then it must point to a NULL-terminated array of UTF-16\ncharacters that refers to a memory object.\n\nIf :py:obj:`~.cudaExternalMemoryHandleDesc.type` is\n:py:obj:`~.cudaExternalMemoryH""andleTypeOpaqueWin32Kmt`, then\n:py:obj:`~.cudaExternalMemoryHandleDesc`::handle::win32::handle must be\nnon-NULL and\n:py:obj:`~.cudaExternalMemoryHandleDesc`::handle::win32::name must be\nNULL. The handle specified must be a globally shared KMT handle. This\nhandle does not hold a reference to the underlying object, and thus\nwill be invalid when all references to the memory object are destroyed.\n\nIf :py:obj:`~.cudaExternalMemoryHandleDesc.type` is\n:py:obj:`~.cudaExternalMemoryHandleTypeD3D12Heap`, then exactly one of\n:py:obj:`~.cudaExternalMemoryHandleDesc`::handle::win32::handle and\n:py:obj:`~.cudaExternalMemoryHandleDesc`::handle::win32::name must not\nbe NULL. If\n:py:obj:`~.cudaExternalMemoryHandleDesc`::handle::win32::handle is not\nNULL, then it must represent a valid shared NT handle that is returned\nby ID3D12Device::CreateSharedHandle when referring to a ID3D12Heap\nobject. This handle holds a reference to the underlying object. If\n:py:obj:`~.cudaExternalMemoryHandleDesc`::handle::win32::name is not\nNULL, then it must point to a NULL-terminated array of UTF-16\ncharacters that refers to a ID3D12Heap object.\n\nIf :py:obj:`~.cudaExternalMemoryHandleDesc.type` is\n:py:obj:`~.cudaExternalMemoryHandleTypeD3D12Resource`, then exactly one\nof :py:obj:`~.cudaExternalMemoryHandleDesc`::handle::win32::handle and\n:py:obj:`~.cudaExternalMemoryHandleDesc`::handle::win32::name must not\nbe NULL. If\n:py:obj:`~.cudaExternalMemoryHandleDesc`::handle::win32::handle is not\nNULL, then it must represent a valid shared NT handle that is returned\nby ID3D12Device::CreateSharedHandle when referring to a ID3D12Resource\nobject. This handle holds a reference to the underlying object. If\n:py:obj:`~.cudaExternalMemoryHandleDesc`::handle::win32::name is not\nNULL, then it must point to a NULL-terminated array of UTF-16\ncharacters that refers to a ID3D12Resource object.\n\nIf :py:obj:`~.cudaExternalMemoryHandleDesc.type` is\n:py:obj:`~.cudaExternalMemoryHandleTypeD3D11Re""source`,then exactly one\nof :py:obj:`~.cudaExternalMemoryHandleDesc`::handle::win32::handle and\n:py:obj:`~.cudaExternalMemoryHandleDesc`::handle::win32::name must not\nbe NULL. If\n:py:obj:`~.cudaExternalMemoryHandleDesc`::handle::win32::handle is\nnot NULL, then it must represent a valid shared NT handle that is\nreturned by IDXGIResource1::CreateSharedHandle when referring to a\nID3D11Resource object. If\n:py:obj:`~.cudaExternalMemoryHandleDesc`::handle::win32::name is not\nNULL, then it must point to a NULL-terminated array of UTF-16\ncharacters that refers to a ID3D11Resource object.\n\nIf :py:obj:`~.cudaExternalMemoryHandleDesc.type` is\n:py:obj:`~.cudaExternalMemoryHandleTypeD3D11ResourceKmt`, then\n:py:obj:`~.cudaExternalMemoryHandleDesc`::handle::win32::handle must be\nnon-NULL and\n:py:obj:`~.cudaExternalMemoryHandleDesc`::handle::win32::name must be\nNULL. The handle specified must be a valid shared KMT handle that is\nreturned by IDXGIResource::GetSharedHandle when referring to a\nID3D11Resource object.\n\nIf :py:obj:`~.cudaExternalMemoryHandleDesc.type` is\n:py:obj:`~.cudaExternalMemoryHandleTypeNvSciBuf`, then\n:py:obj:`~.cudaExternalMemoryHandleDesc`::handle::nvSciBufObject must\nbe NON-NULL and reference a valid NvSciBuf object. If the NvSciBuf\nobject imported into CUDA is also mapped by other drivers, then the\napplication must use :py:obj:`~.cudaWaitExternalSemaphoresAsync` or\n:py:obj:`~.cudaSignalExternalSemaphoresAsync` as approprriate barriers\nto maintain coherence between CUDA and the other drivers. See\n:py:obj:`~.cudaExternalSemaphoreWaitSkipNvSciBufMemSync` and\n:py:obj:`~.cudaExternalSemaphoreSignalSkipNvSciBufMemSync` for memory\nsynchronization.\n\nThe size of the memory object must be specified in\n:py:obj:`~.cudaExternalMemoryHandleDesc.size`.\n\nSpecifying the flag :py:obj:`~.cudaExternalMemoryDedicated` in\n:py:obj:`~.cudaExternalMemoryHandleDesc.flags` indicates that the\nresource is a dedicated resource. The definition of what a"" dedicated\nresource is outside the scope of this extension. This flag must be set\nif :py:obj:`~.cudaExternalMemoryHandleDesc.type` is one of the\nfollowing: :py:obj:`~.cudaExternalMemoryHandleTypeD3D12Resource`\n:py:obj:`~.cudaExternalMemoryHandleTypeD3D11Resource`\n:py:obj:`~.cudaExternalMemoryHandleTypeD3D11ResourceKmt`\n\nParameters\n----------\nmemHandleDesc : :py:obj:`~.cudaExternalMemoryHandleDesc`\n Memory import handle descriptor\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidResourceHandle`, :py:obj:`~.cudaErrorOperatingSystem`\nextMem_out : :py:obj:`~.cudaExternalMemory_t`\n Returned handle to an external memory object\n\nSee Also\n--------\n:py:obj:`~.cudaDestroyExternalMemory`, :py:obj:`~.cudaExternalMemoryGetMappedBuffer`, :py:obj:`~.cudaExternalMemoryGetMappedMipmappedArray`\n\nNotes\n-----\nIf the Vulkan memory imported into CUDA is mapped on the CPU then the application must use vkInvalidateMappedMemoryRanges/vkFlushMappedMemoryRanges as well as appropriate Vulkan pipeline barriers to maintain coherence between CPU and GPU. For more information on these APIs, please refer to \"Synchronization\nand Cache Control\" chapter from Vulkan specification."); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_147cudaImportExternalMemory = {"cudaImportExternalMemory", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_147cudaImportExternalMemory, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_146cudaImportExternalMemory}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_147cudaImportExternalMemory(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaExternalMemoryHandleDesc *__pyx_v_memHandleDesc = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaImportExternalMemory (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_memHandleDesc,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 18204, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18204, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaImportExternalMemory", 0) < (0)) __PYX_ERR(0, 18204, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaImportExternalMemory", 1, 1, 1, i); __PYX_ERR(0, 18204, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18204, __pyx_L3_error) } __pyx_v_memHandleDesc = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaExternalMemoryHandleDesc *)values[0]); } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaImportExternalMemory", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 18204, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaImportExternalMemory", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_memHandleDesc), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaExternalMemoryHandleDesc, 1, "memHandleDesc", 0))) __PYX_ERR(0, 18205, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_146cudaImportExternalMemory(__pyx_self, __pyx_v_memHandleDesc); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_146cudaImportExternalMemory(CYTHON_UNUSED PyObject *__pyx_self, struct __pyx_obj_4cuda_8bindings_7runtime_cudaExternalMemoryHandleDesc *__pyx_v_memHandleDesc) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaExternalMemory_t *__pyx_v_extMem_out = 0; struct cudaExternalMemoryHandleDesc *__pyx_v_cymemHandleDesc_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; struct cudaExternalMemoryHandleDesc *__pyx_t_5; int __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaImportExternalMemory", 0); /* "cuda/bindings/runtime.pyx":18347 * and Cache Control" chapter from Vulkan specification. * """ * cdef cudaExternalMemory_t extMem_out = cudaExternalMemory_t() # <<<<<<<<<<<<<< * cdef cyruntime.cudaExternalMemoryHandleDesc* cymemHandleDesc_ptr = memHandleDesc._pvt_ptr if memHandleDesc is not None else NULL * with nogil: */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaExternalMemory_t); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaExternalMemory_t); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (1-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18347, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_extMem_out = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaExternalMemory_t *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":18348 * """ * cdef cudaExternalMemory_t extMem_out = cudaExternalMemory_t() * cdef cyruntime.cudaExternalMemoryHandleDesc* cymemHandleDesc_ptr = memHandleDesc._pvt_ptr if memHandleDesc is not None else NULL # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaImportExternalMemory(extMem_out._pvt_ptr, cymemHandleDesc_ptr) */ __pyx_t_6 = (((PyObject *)__pyx_v_memHandleDesc) != Py_None); if (__pyx_t_6) { __pyx_t_5 = __pyx_v_memHandleDesc->_pvt_ptr; } else { __pyx_t_5 = NULL; } __pyx_v_cymemHandleDesc_ptr = __pyx_t_5; /* "cuda/bindings/runtime.pyx":18349 * cdef cudaExternalMemory_t extMem_out = cudaExternalMemory_t() * cdef cyruntime.cudaExternalMemoryHandleDesc* cymemHandleDesc_ptr = memHandleDesc._pvt_ptr if memHandleDesc is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaImportExternalMemory(extMem_out._pvt_ptr, cymemHandleDesc_ptr) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":18350 * cdef cyruntime.cudaExternalMemoryHandleDesc* cymemHandleDesc_ptr = memHandleDesc._pvt_ptr if memHandleDesc is not None else NULL * with nogil: * err = cyruntime.cudaImportExternalMemory(extMem_out._pvt_ptr, cymemHandleDesc_ptr) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaImportExternalMemory(((cudaExternalMemory_t *)__pyx_v_extMem_out->_pvt_ptr), __pyx_v_cymemHandleDesc_ptr); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 18350, __pyx_L4_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":18349 * cdef cudaExternalMemory_t extMem_out = cudaExternalMemory_t() * cdef cyruntime.cudaExternalMemoryHandleDesc* cymemHandleDesc_ptr = memHandleDesc._pvt_ptr if memHandleDesc is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaImportExternalMemory(extMem_out._pvt_ptr, cymemHandleDesc_ptr) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":18351 * with nogil: * err = cyruntime.cudaImportExternalMemory(extMem_out._pvt_ptr, cymemHandleDesc_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], extMem_out) */ __pyx_t_6 = (__pyx_v_err != cudaSuccess); if (__pyx_t_6) { /* "cuda/bindings/runtime.pyx":18352 * err = cyruntime.cudaImportExternalMemory(extMem_out._pvt_ptr, cymemHandleDesc_ptr) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], extMem_out) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18352, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18352, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18352, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18352, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 18352, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 18352, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":18351 * with nogil: * err = cyruntime.cudaImportExternalMemory(extMem_out._pvt_ptr, cymemHandleDesc_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], extMem_out) */ } /* "cuda/bindings/runtime.pyx":18353 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], extMem_out) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18353, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18353, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18353, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18353, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 18353, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_extMem_out); __Pyx_GIVEREF((PyObject *)__pyx_v_extMem_out); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_extMem_out)) != (0)) __PYX_ERR(0, 18353, __pyx_L1_error); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":18204 * return (_dict_cudaError_t[err], ms) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaImportExternalMemory(memHandleDesc : Optional[cudaExternalMemoryHandleDesc]): * """ Imports an external memory object. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaImportExternalMemory", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_extMem_out); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":18355 * return (_dict_cudaError_t[err], extMem_out) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaExternalMemoryGetMappedBuffer(extMem, bufferDesc : Optional[cudaExternalMemoryBufferDesc]): * """ Maps a buffer onto an imported memory object. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_149cudaExternalMemoryGetMappedBuffer(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_148cudaExternalMemoryGetMappedBuffer, "cudaExternalMemoryGetMappedBuffer(extMem, cudaExternalMemoryBufferDesc bufferDesc: Optional[cudaExternalMemoryBufferDesc])\n\nMaps a buffer onto an imported memory object.\n\nMaps a buffer onto an imported memory object and returns a device\npointer in `devPtr`.\n\nThe properties of the buffer being mapped must be described in\n`bufferDesc`. The :py:obj:`~.cudaExternalMemoryBufferDesc` structure is\ndefined as follows:\n\n**View CUDA Toolkit Documentation for a C++ code example**\n\nwhere :py:obj:`~.cudaExternalMemoryBufferDesc.offset` is the offset in\nthe memory object where the buffer's base address is.\n:py:obj:`~.cudaExternalMemoryBufferDesc.size` is the size of the\nbuffer. :py:obj:`~.cudaExternalMemoryBufferDesc.flags` must be zero.\n\nThe offset and size have to be suitably aligned to match the\nrequirements of the external API. Mapping two buffers whose ranges\noverlap may or may not result in the same virtual address being\nreturned for the overlapped portion. In such cases, the application\nmust ensure that all accesses to that region from the GPU are volatile.\nOtherwise writes made via one address are not guaranteed to be visible\nvia the other address, even if they're issued by the same thread. It is\nrecommended that applications map the combined range instead of mapping\nseparate buffers and then apply the appropriate offsets to the returned\npointer to derive the individual buffers.\n\nThe returned pointer `devPtr` must be freed using :py:obj:`~.cudaFree`.\n\nParameters\n----------\nextMem : :py:obj:`~.cudaExternalMemory_t`\n Handle to external memory object\nbufferDesc : :py:obj:`~.cudaExternalMemoryBufferDesc`\n Buffer descriptor\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidResourceHandle`\ndevPtr : Any\n Returned device pointer to buffer\n\nSee Also\n--------\n:py:obj:`~.cudaImportExternalMemory`, :py:obj:`~.cudaDestroyExternalMemory`, :py:obj:`~.cudaExternal""MemoryGetMappedMipmappedArray`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_149cudaExternalMemoryGetMappedBuffer = {"cudaExternalMemoryGetMappedBuffer", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_149cudaExternalMemoryGetMappedBuffer, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_148cudaExternalMemoryGetMappedBuffer}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_149cudaExternalMemoryGetMappedBuffer(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_extMem = 0; struct __pyx_obj_4cuda_8bindings_7runtime_cudaExternalMemoryBufferDesc *__pyx_v_bufferDesc = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaExternalMemoryGetMappedBuffer (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_extMem,&__pyx_mstate_global->__pyx_n_u_bufferDesc,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 18355, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 18355, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18355, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaExternalMemoryGetMappedBuffer", 0) < (0)) __PYX_ERR(0, 18355, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaExternalMemoryGetMappedBuffer", 1, 2, 2, i); __PYX_ERR(0, 18355, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18355, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 18355, __pyx_L3_error) } __pyx_v_extMem = values[0]; __pyx_v_bufferDesc = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaExternalMemoryBufferDesc *)values[1]); } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaExternalMemoryGetMappedBuffer", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 18355, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaExternalMemoryGetMappedBuffer", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_bufferDesc), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaExternalMemoryBufferDesc, 1, "bufferDesc", 0))) __PYX_ERR(0, 18356, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_148cudaExternalMemoryGetMappedBuffer(__pyx_self, __pyx_v_extMem, __pyx_v_bufferDesc); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_148cudaExternalMemoryGetMappedBuffer(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_extMem, struct __pyx_obj_4cuda_8bindings_7runtime_cudaExternalMemoryBufferDesc *__pyx_v_bufferDesc) { cudaExternalMemory_t __pyx_v_cyextMem; PyObject *__pyx_v_pextMem = NULL; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v_devPtr; struct cudaExternalMemoryBufferDesc *__pyx_v_cybufferDesc_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; struct cudaExternalMemoryBufferDesc *__pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaExternalMemoryGetMappedBuffer", 0); /* "cuda/bindings/runtime.pyx":18405 * """ * cdef cyruntime.cudaExternalMemory_t cyextMem * if extMem is None: # <<<<<<<<<<<<<< * pextMem = 0 * elif isinstance(extMem, (cudaExternalMemory_t,)): */ __pyx_t_1 = (__pyx_v_extMem == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18406 * cdef cyruntime.cudaExternalMemory_t cyextMem * if extMem is None: * pextMem = 0 # <<<<<<<<<<<<<< * elif isinstance(extMem, (cudaExternalMemory_t,)): * pextMem = int(extMem) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pextMem = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":18405 * """ * cdef cyruntime.cudaExternalMemory_t cyextMem * if extMem is None: # <<<<<<<<<<<<<< * pextMem = 0 * elif isinstance(extMem, (cudaExternalMemory_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":18407 * if extMem is None: * pextMem = 0 * elif isinstance(extMem, (cudaExternalMemory_t,)): # <<<<<<<<<<<<<< * pextMem = int(extMem) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_extMem, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaExternalMemory_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18408 * pextMem = 0 * elif isinstance(extMem, (cudaExternalMemory_t,)): * pextMem = int(extMem) # <<<<<<<<<<<<<< * else: * pextMem = int(cudaExternalMemory_t(extMem)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_extMem); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18408, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_pextMem = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":18407 * if extMem is None: * pextMem = 0 * elif isinstance(extMem, (cudaExternalMemory_t,)): # <<<<<<<<<<<<<< * pextMem = int(extMem) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":18410 * pextMem = int(extMem) * else: * pextMem = int(cudaExternalMemory_t(extMem)) # <<<<<<<<<<<<<< * cyextMem = pextMem * cdef void_ptr devPtr = 0 */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaExternalMemory_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaExternalMemory_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_extMem}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18410, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18410, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_pextMem = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":18411 * else: * pextMem = int(cudaExternalMemory_t(extMem)) * cyextMem = pextMem # <<<<<<<<<<<<<< * cdef void_ptr devPtr = 0 * cdef cyruntime.cudaExternalMemoryBufferDesc* cybufferDesc_ptr = bufferDesc._pvt_ptr if bufferDesc is not None else NULL */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pextMem); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 18411, __pyx_L1_error) __pyx_v_cyextMem = ((cudaExternalMemory_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":18412 * pextMem = int(cudaExternalMemory_t(extMem)) * cyextMem = pextMem * cdef void_ptr devPtr = 0 # <<<<<<<<<<<<<< * cdef cyruntime.cudaExternalMemoryBufferDesc* cybufferDesc_ptr = bufferDesc._pvt_ptr if bufferDesc is not None else NULL * with nogil: */ __pyx_v_devPtr = 0; /* "cuda/bindings/runtime.pyx":18413 * cyextMem = pextMem * cdef void_ptr devPtr = 0 * cdef cyruntime.cudaExternalMemoryBufferDesc* cybufferDesc_ptr = bufferDesc._pvt_ptr if bufferDesc is not None else NULL # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaExternalMemoryGetMappedBuffer(&devPtr, cyextMem, cybufferDesc_ptr) */ __pyx_t_1 = (((PyObject *)__pyx_v_bufferDesc) != Py_None); if (__pyx_t_1) { __pyx_t_7 = __pyx_v_bufferDesc->_pvt_ptr; } else { __pyx_t_7 = NULL; } __pyx_v_cybufferDesc_ptr = __pyx_t_7; /* "cuda/bindings/runtime.pyx":18414 * cdef void_ptr devPtr = 0 * cdef cyruntime.cudaExternalMemoryBufferDesc* cybufferDesc_ptr = bufferDesc._pvt_ptr if bufferDesc is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaExternalMemoryGetMappedBuffer(&devPtr, cyextMem, cybufferDesc_ptr) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":18415 * cdef cyruntime.cudaExternalMemoryBufferDesc* cybufferDesc_ptr = bufferDesc._pvt_ptr if bufferDesc is not None else NULL * with nogil: * err = cyruntime.cudaExternalMemoryGetMappedBuffer(&devPtr, cyextMem, cybufferDesc_ptr) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaExternalMemoryGetMappedBuffer(((void **)(&__pyx_v_devPtr)), __pyx_v_cyextMem, __pyx_v_cybufferDesc_ptr); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 18415, __pyx_L5_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":18414 * cdef void_ptr devPtr = 0 * cdef cyruntime.cudaExternalMemoryBufferDesc* cybufferDesc_ptr = bufferDesc._pvt_ptr if bufferDesc is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaExternalMemoryGetMappedBuffer(&devPtr, cyextMem, cybufferDesc_ptr) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L6; } __pyx_L5_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L6:; } } /* "cuda/bindings/runtime.pyx":18416 * with nogil: * err = cyruntime.cudaExternalMemoryGetMappedBuffer(&devPtr, cyextMem, cybufferDesc_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], devPtr) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18417 * err = cyruntime.cudaExternalMemoryGetMappedBuffer(&devPtr, cyextMem, cybufferDesc_ptr) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], devPtr) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18417, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18417, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18417, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18417, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 18417, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, Py_None) != (0)) __PYX_ERR(0, 18417, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":18416 * with nogil: * err = cyruntime.cudaExternalMemoryGetMappedBuffer(&devPtr, cyextMem, cybufferDesc_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], devPtr) */ } /* "cuda/bindings/runtime.pyx":18418 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], devPtr) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18418, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18418, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18418, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(__pyx_v_devPtr); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18418, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18418, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 18418, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3) != (0)) __PYX_ERR(0, 18418, __pyx_L1_error); __pyx_t_4 = 0; __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":18355 * return (_dict_cudaError_t[err], extMem_out) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaExternalMemoryGetMappedBuffer(extMem, bufferDesc : Optional[cudaExternalMemoryBufferDesc]): * """ Maps a buffer onto an imported memory object. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaExternalMemoryGetMappedBuffer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pextMem); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":18420 * return (_dict_cudaError_t[err], devPtr) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaExternalMemoryGetMappedMipmappedArray(extMem, mipmapDesc : Optional[cudaExternalMemoryMipmappedArrayDesc]): * """ Maps a CUDA mipmapped array onto an external memory object. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_151cudaExternalMemoryGetMappedMipmappedArray(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_150cudaExternalMemoryGetMappedMipmappedArray, "cudaExternalMemoryGetMappedMipmappedArray(extMem, cudaExternalMemoryMipmappedArrayDesc mipmapDesc: Optional[cudaExternalMemoryMipmappedArrayDesc])\n\nMaps a CUDA mipmapped array onto an external memory object.\n\nMaps a CUDA mipmapped array onto an external object and returns a\nhandle to it in `mipmap`.\n\nThe properties of the CUDA mipmapped array being mapped must be\ndescribed in `mipmapDesc`. The structure\n:py:obj:`~.cudaExternalMemoryMipmappedArrayDesc` is defined as follows:\n\n**View CUDA Toolkit Documentation for a C++ code example**\n\nwhere :py:obj:`~.cudaExternalMemoryMipmappedArrayDesc.offset` is the\noffset in the memory object where the base level of the mipmap chain\nis. :py:obj:`~.cudaExternalMemoryMipmappedArrayDesc.formatDesc`\ndescribes the format of the data.\n:py:obj:`~.cudaExternalMemoryMipmappedArrayDesc.extent` specifies the\ndimensions of the base level of the mipmap chain.\n:py:obj:`~.cudaExternalMemoryMipmappedArrayDesc.flags` are flags\nassociated with CUDA mipmapped arrays. For further details, please\nrefer to the documentation for :py:obj:`~.cudaMalloc3DArray`. Note that\nif the mipmapped array is bound as a color target in the graphics API,\nthen the flag :py:obj:`~.cudaArrayColorAttachment` must be specified in\n:py:obj:`~.cudaExternalMemoryMipmappedArrayDesc.flags`.\n:py:obj:`~.cudaExternalMemoryMipmappedArrayDesc.numLevels` specifies\nthe total number of levels in the mipmap chain.\n\nThe returned CUDA mipmapped array must be freed using\n:py:obj:`~.cudaFreeMipmappedArray`.\n\nParameters\n----------\nextMem : :py:obj:`~.cudaExternalMemory_t`\n Handle to external memory object\nmipmapDesc : :py:obj:`~.cudaExternalMemoryMipmappedArrayDesc`\n CUDA array descriptor\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidResourceHandle`\nmipmap : :py:obj:`~.cudaMipmappedArray_t`\n Returned CUDA mipmapped array\n\nSee Also\n--------\n:py:obj:`~.cudaImportExte""rnalMemory`, :py:obj:`~.cudaDestroyExternalMemory`, :py:obj:`~.cudaExternalMemoryGetMappedBuffer`\n\nNotes\n-----\nIf :py:obj:`~.cudaExternalMemoryHandleDesc.type` is :py:obj:`~.cudaExternalMemoryHandleTypeNvSciBuf`, then :py:obj:`~.cudaExternalMemoryMipmappedArrayDesc.numLevels` must not be greater than 1."); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_151cudaExternalMemoryGetMappedMipmappedArray = {"cudaExternalMemoryGetMappedMipmappedArray", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_151cudaExternalMemoryGetMappedMipmappedArray, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_150cudaExternalMemoryGetMappedMipmappedArray}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_151cudaExternalMemoryGetMappedMipmappedArray(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_extMem = 0; struct __pyx_obj_4cuda_8bindings_7runtime_cudaExternalMemoryMipmappedArrayDesc *__pyx_v_mipmapDesc = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaExternalMemoryGetMappedMipmappedArray (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_extMem,&__pyx_mstate_global->__pyx_n_u_mipmapDesc,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 18420, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 18420, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18420, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaExternalMemoryGetMappedMipmappedArray", 0) < (0)) __PYX_ERR(0, 18420, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaExternalMemoryGetMappedMipmappedArray", 1, 2, 2, i); __PYX_ERR(0, 18420, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18420, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 18420, __pyx_L3_error) } __pyx_v_extMem = values[0]; __pyx_v_mipmapDesc = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaExternalMemoryMipmappedArrayDesc *)values[1]); } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaExternalMemoryGetMappedMipmappedArray", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 18420, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaExternalMemoryGetMappedMipmappedArray", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_mipmapDesc), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaExternalMemoryMipmappedArrayDesc, 1, "mipmapDesc", 0))) __PYX_ERR(0, 18421, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_150cudaExternalMemoryGetMappedMipmappedArray(__pyx_self, __pyx_v_extMem, __pyx_v_mipmapDesc); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_150cudaExternalMemoryGetMappedMipmappedArray(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_extMem, struct __pyx_obj_4cuda_8bindings_7runtime_cudaExternalMemoryMipmappedArrayDesc *__pyx_v_mipmapDesc) { cudaExternalMemory_t __pyx_v_cyextMem; PyObject *__pyx_v_pextMem = NULL; struct __pyx_obj_4cuda_8bindings_7runtime_cudaMipmappedArray_t *__pyx_v_mipmap = 0; struct cudaExternalMemoryMipmappedArrayDesc *__pyx_v_cymipmapDesc_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; struct cudaExternalMemoryMipmappedArrayDesc *__pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaExternalMemoryGetMappedMipmappedArray", 0); /* "cuda/bindings/runtime.pyx":18474 * """ * cdef cyruntime.cudaExternalMemory_t cyextMem * if extMem is None: # <<<<<<<<<<<<<< * pextMem = 0 * elif isinstance(extMem, (cudaExternalMemory_t,)): */ __pyx_t_1 = (__pyx_v_extMem == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18475 * cdef cyruntime.cudaExternalMemory_t cyextMem * if extMem is None: * pextMem = 0 # <<<<<<<<<<<<<< * elif isinstance(extMem, (cudaExternalMemory_t,)): * pextMem = int(extMem) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pextMem = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":18474 * """ * cdef cyruntime.cudaExternalMemory_t cyextMem * if extMem is None: # <<<<<<<<<<<<<< * pextMem = 0 * elif isinstance(extMem, (cudaExternalMemory_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":18476 * if extMem is None: * pextMem = 0 * elif isinstance(extMem, (cudaExternalMemory_t,)): # <<<<<<<<<<<<<< * pextMem = int(extMem) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_extMem, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaExternalMemory_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18477 * pextMem = 0 * elif isinstance(extMem, (cudaExternalMemory_t,)): * pextMem = int(extMem) # <<<<<<<<<<<<<< * else: * pextMem = int(cudaExternalMemory_t(extMem)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_extMem); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18477, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_pextMem = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":18476 * if extMem is None: * pextMem = 0 * elif isinstance(extMem, (cudaExternalMemory_t,)): # <<<<<<<<<<<<<< * pextMem = int(extMem) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":18479 * pextMem = int(extMem) * else: * pextMem = int(cudaExternalMemory_t(extMem)) # <<<<<<<<<<<<<< * cyextMem = pextMem * cdef cudaMipmappedArray_t mipmap = cudaMipmappedArray_t() */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaExternalMemory_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaExternalMemory_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_extMem}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18479, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18479, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_pextMem = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":18480 * else: * pextMem = int(cudaExternalMemory_t(extMem)) * cyextMem = pextMem # <<<<<<<<<<<<<< * cdef cudaMipmappedArray_t mipmap = cudaMipmappedArray_t() * cdef cyruntime.cudaExternalMemoryMipmappedArrayDesc* cymipmapDesc_ptr = mipmapDesc._pvt_ptr if mipmapDesc is not None else NULL */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pextMem); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 18480, __pyx_L1_error) __pyx_v_cyextMem = ((cudaExternalMemory_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":18481 * pextMem = int(cudaExternalMemory_t(extMem)) * cyextMem = pextMem * cdef cudaMipmappedArray_t mipmap = cudaMipmappedArray_t() # <<<<<<<<<<<<<< * cdef cyruntime.cudaExternalMemoryMipmappedArrayDesc* cymipmapDesc_ptr = mipmapDesc._pvt_ptr if mipmapDesc is not None else NULL * with nogil: */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMipmappedArray_t); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMipmappedArray_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18481, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_4); } __pyx_v_mipmap = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaMipmappedArray_t *)__pyx_t_4); __pyx_t_4 = 0; /* "cuda/bindings/runtime.pyx":18482 * cyextMem = pextMem * cdef cudaMipmappedArray_t mipmap = cudaMipmappedArray_t() * cdef cyruntime.cudaExternalMemoryMipmappedArrayDesc* cymipmapDesc_ptr = mipmapDesc._pvt_ptr if mipmapDesc is not None else NULL # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaExternalMemoryGetMappedMipmappedArray(mipmap._pvt_ptr, cyextMem, cymipmapDesc_ptr) */ __pyx_t_1 = (((PyObject *)__pyx_v_mipmapDesc) != Py_None); if (__pyx_t_1) { __pyx_t_7 = __pyx_v_mipmapDesc->_pvt_ptr; } else { __pyx_t_7 = NULL; } __pyx_v_cymipmapDesc_ptr = __pyx_t_7; /* "cuda/bindings/runtime.pyx":18483 * cdef cudaMipmappedArray_t mipmap = cudaMipmappedArray_t() * cdef cyruntime.cudaExternalMemoryMipmappedArrayDesc* cymipmapDesc_ptr = mipmapDesc._pvt_ptr if mipmapDesc is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaExternalMemoryGetMappedMipmappedArray(mipmap._pvt_ptr, cyextMem, cymipmapDesc_ptr) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":18484 * cdef cyruntime.cudaExternalMemoryMipmappedArrayDesc* cymipmapDesc_ptr = mipmapDesc._pvt_ptr if mipmapDesc is not None else NULL * with nogil: * err = cyruntime.cudaExternalMemoryGetMappedMipmappedArray(mipmap._pvt_ptr, cyextMem, cymipmapDesc_ptr) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaExternalMemoryGetMappedMipmappedArray(((cudaMipmappedArray_t *)__pyx_v_mipmap->_pvt_ptr), __pyx_v_cyextMem, __pyx_v_cymipmapDesc_ptr); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 18484, __pyx_L5_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":18483 * cdef cudaMipmappedArray_t mipmap = cudaMipmappedArray_t() * cdef cyruntime.cudaExternalMemoryMipmappedArrayDesc* cymipmapDesc_ptr = mipmapDesc._pvt_ptr if mipmapDesc is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaExternalMemoryGetMappedMipmappedArray(mipmap._pvt_ptr, cyextMem, cymipmapDesc_ptr) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L6; } __pyx_L5_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L6:; } } /* "cuda/bindings/runtime.pyx":18485 * with nogil: * err = cyruntime.cudaExternalMemoryGetMappedMipmappedArray(mipmap._pvt_ptr, cyextMem, cymipmapDesc_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], mipmap) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18486 * err = cyruntime.cudaExternalMemoryGetMappedMipmappedArray(mipmap._pvt_ptr, cyextMem, cymipmapDesc_ptr) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], mipmap) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18486, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18486, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18486, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18486, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 18486, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 18486, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":18485 * with nogil: * err = cyruntime.cudaExternalMemoryGetMappedMipmappedArray(mipmap._pvt_ptr, cyextMem, cymipmapDesc_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], mipmap) */ } /* "cuda/bindings/runtime.pyx":18487 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], mipmap) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18487, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18487, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18487, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18487, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 18487, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_mipmap); __Pyx_GIVEREF((PyObject *)__pyx_v_mipmap); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_mipmap)) != (0)) __PYX_ERR(0, 18487, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":18420 * return (_dict_cudaError_t[err], devPtr) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaExternalMemoryGetMappedMipmappedArray(extMem, mipmapDesc : Optional[cudaExternalMemoryMipmappedArrayDesc]): * """ Maps a CUDA mipmapped array onto an external memory object. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaExternalMemoryGetMappedMipmappedArray", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pextMem); __Pyx_XDECREF((PyObject *)__pyx_v_mipmap); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":18489 * return (_dict_cudaError_t[err], mipmap) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDestroyExternalMemory(extMem): * """ Destroys an external memory object. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_153cudaDestroyExternalMemory(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_152cudaDestroyExternalMemory, "cudaDestroyExternalMemory(extMem)\n\nDestroys an external memory object.\n\nDestroys the specified external memory object. Any existing buffers and\nCUDA mipmapped arrays mapped onto this object must no longer be used\nand must be explicitly freed using :py:obj:`~.cudaFree` and\n:py:obj:`~.cudaFreeMipmappedArray` respectively.\n\nParameters\n----------\nextMem : :py:obj:`~.cudaExternalMemory_t`\n External memory object to be destroyed\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidResourceHandle`\n\nSee Also\n--------\n:py:obj:`~.cudaImportExternalMemory`, :py:obj:`~.cudaExternalMemoryGetMappedBuffer`, :py:obj:`~.cudaExternalMemoryGetMappedMipmappedArray`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_153cudaDestroyExternalMemory = {"cudaDestroyExternalMemory", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_153cudaDestroyExternalMemory, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_152cudaDestroyExternalMemory}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_153cudaDestroyExternalMemory(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_extMem = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaDestroyExternalMemory (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_extMem,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 18489, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18489, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaDestroyExternalMemory", 0) < (0)) __PYX_ERR(0, 18489, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaDestroyExternalMemory", 1, 1, 1, i); __PYX_ERR(0, 18489, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18489, __pyx_L3_error) } __pyx_v_extMem = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaDestroyExternalMemory", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 18489, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaDestroyExternalMemory", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_152cudaDestroyExternalMemory(__pyx_self, __pyx_v_extMem); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_152cudaDestroyExternalMemory(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_extMem) { cudaExternalMemory_t __pyx_v_cyextMem; PyObject *__pyx_v_pextMem = NULL; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaDestroyExternalMemory", 0); /* "cuda/bindings/runtime.pyx":18513 * """ * cdef cyruntime.cudaExternalMemory_t cyextMem * if extMem is None: # <<<<<<<<<<<<<< * pextMem = 0 * elif isinstance(extMem, (cudaExternalMemory_t,)): */ __pyx_t_1 = (__pyx_v_extMem == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18514 * cdef cyruntime.cudaExternalMemory_t cyextMem * if extMem is None: * pextMem = 0 # <<<<<<<<<<<<<< * elif isinstance(extMem, (cudaExternalMemory_t,)): * pextMem = int(extMem) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pextMem = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":18513 * """ * cdef cyruntime.cudaExternalMemory_t cyextMem * if extMem is None: # <<<<<<<<<<<<<< * pextMem = 0 * elif isinstance(extMem, (cudaExternalMemory_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":18515 * if extMem is None: * pextMem = 0 * elif isinstance(extMem, (cudaExternalMemory_t,)): # <<<<<<<<<<<<<< * pextMem = int(extMem) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_extMem, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaExternalMemory_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18516 * pextMem = 0 * elif isinstance(extMem, (cudaExternalMemory_t,)): * pextMem = int(extMem) # <<<<<<<<<<<<<< * else: * pextMem = int(cudaExternalMemory_t(extMem)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_extMem); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18516, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_pextMem = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":18515 * if extMem is None: * pextMem = 0 * elif isinstance(extMem, (cudaExternalMemory_t,)): # <<<<<<<<<<<<<< * pextMem = int(extMem) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":18518 * pextMem = int(extMem) * else: * pextMem = int(cudaExternalMemory_t(extMem)) # <<<<<<<<<<<<<< * cyextMem = pextMem * with nogil: */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaExternalMemory_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaExternalMemory_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_extMem}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18518, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18518, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_pextMem = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":18519 * else: * pextMem = int(cudaExternalMemory_t(extMem)) * cyextMem = pextMem # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaDestroyExternalMemory(cyextMem) */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pextMem); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 18519, __pyx_L1_error) __pyx_v_cyextMem = ((cudaExternalMemory_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":18520 * pextMem = int(cudaExternalMemory_t(extMem)) * cyextMem = pextMem * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDestroyExternalMemory(cyextMem) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":18521 * cyextMem = pextMem * with nogil: * err = cyruntime.cudaDestroyExternalMemory(cyextMem) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaDestroyExternalMemory(__pyx_v_cyextMem); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 18521, __pyx_L5_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":18520 * pextMem = int(cudaExternalMemory_t(extMem)) * cyextMem = pextMem * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDestroyExternalMemory(cyextMem) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L6; } __pyx_L5_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L6:; } } /* "cuda/bindings/runtime.pyx":18522 * with nogil: * err = cyruntime.cudaDestroyExternalMemory(cyextMem) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18522, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18522, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18522, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18522, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 18522, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":18489 * return (_dict_cudaError_t[err], mipmap) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDestroyExternalMemory(extMem): * """ Destroys an external memory object. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaDestroyExternalMemory", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pextMem); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":18524 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaImportExternalSemaphore(semHandleDesc : Optional[cudaExternalSemaphoreHandleDesc]): * """ Imports an external semaphore. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_155cudaImportExternalSemaphore(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_154cudaImportExternalSemaphore, "cudaImportExternalSemaphore(cudaExternalSemaphoreHandleDesc semHandleDesc: Optional[cudaExternalSemaphoreHandleDesc])\n\nImports an external semaphore.\n\nImports an externally allocated synchronization object and returns a\nhandle to that in `extSem_out`.\n\nThe properties of the handle being imported must be described in\n`semHandleDesc`. The :py:obj:`~.cudaExternalSemaphoreHandleDesc` is\ndefined as follows:\n\n**View CUDA Toolkit Documentation for a C++ code example**\n\nwhere :py:obj:`~.cudaExternalSemaphoreHandleDesc.type` specifies the\ntype of handle being imported.\n:py:obj:`~.cudaExternalSemaphoreHandleType` is defined as:\n\n**View CUDA Toolkit Documentation for a C++ code example**\n\nIf :py:obj:`~.cudaExternalSemaphoreHandleDesc.type` is\n:py:obj:`~.cudaExternalSemaphoreHandleTypeOpaqueFd`, then\n:py:obj:`~.cudaExternalSemaphoreHandleDesc`::handle::fd must be a valid\nfile descriptor referencing a synchronization object. Ownership of the\nfile descriptor is transferred to the CUDA driver when the handle is\nimported successfully. Performing any operations on the file descriptor\nafter it is imported results in undefined behavior.\n\nIf :py:obj:`~.cudaExternalSemaphoreHandleDesc.type` is\n:py:obj:`~.cudaExternalSemaphoreHandleTypeOpaqueWin32`, then exactly\none of\n:py:obj:`~.cudaExternalSemaphoreHandleDesc`::handle::win32::handle and\n:py:obj:`~.cudaExternalSemaphoreHandleDesc`::handle::win32::name must\nnot be NULL. If\n:py:obj:`~.cudaExternalSemaphoreHandleDesc`::handle::win32::handle is\nnot NULL, then it must represent a valid shared NT handle that\nreferences a synchronization object. Ownership of this handle is not\ntransferred to CUDA after the import operation, so the application must\nrelease the handle using the appropriate system call. If\n:py:obj:`~.cudaExternalSemaphoreHandleDesc`::handle::win32::name is not\nNULL, then it must name a valid synchronization object.\n\nIf :py:obj:`~.cudaExternalSemaphoreHandleDesc.type` is\n:py:obj:`~.cudaExt""ernalSemaphoreHandleTypeOpaqueWin32Kmt`, then\n:py:obj:`~.cudaExternalSemaphoreHandleDesc`::handle::win32::handle must\nbe non-NULL and\n:py:obj:`~.cudaExternalSemaphoreHandleDesc`::handle::win32::name must\nbe NULL. The handle specified must be a globally shared KMT handle.\nThis handle does not hold a reference to the underlying object, and\nthus will be invalid when all references to the synchronization object\nare destroyed.\n\nIf :py:obj:`~.cudaExternalSemaphoreHandleDesc.type` is\n:py:obj:`~.cudaExternalSemaphoreHandleTypeD3D12Fence`, then exactly one\nof :py:obj:`~.cudaExternalSemaphoreHandleDesc`::handle::win32::handle\nand :py:obj:`~.cudaExternalSemaphoreHandleDesc`::handle::win32::name\nmust not be NULL. If\n:py:obj:`~.cudaExternalSemaphoreHandleDesc`::handle::win32::handle is\nnot NULL, then it must represent a valid shared NT handle that is\nreturned by ID3D12Device::CreateSharedHandle when referring to a\nID3D12Fence object. This handle holds a reference to the underlying\nobject. If\n:py:obj:`~.cudaExternalSemaphoreHandleDesc`::handle::win32::name is not\nNULL, then it must name a valid synchronization object that refers to a\nvalid ID3D12Fence object.\n\nIf :py:obj:`~.cudaExternalSemaphoreHandleDesc.type` is\n:py:obj:`~.cudaExternalSemaphoreHandleTypeD3D11Fence`, then exactly one\nof :py:obj:`~.cudaExternalSemaphoreHandleDesc`::handle::win32::handle\nand :py:obj:`~.cudaExternalSemaphoreHandleDesc`::handle::win32::name\nmust not be NULL. If\n:py:obj:`~.cudaExternalSemaphoreHandleDesc`::handle::win32::handle is\nnot NULL, then it must represent a valid shared NT handle that is\nreturned by ID3D11Fence::CreateSharedHandle. If\n:py:obj:`~.cudaExternalSemaphoreHandleDesc`::handle::win32::name is not\nNULL, then it must name a valid synchronization object that refers to a\nvalid ID3D11Fence object.\n\nIf :py:obj:`~.cudaExternalSemaphoreHandleDesc.type` is\n:py:obj:`~.cudaExternalSemaphoreHandleTypeNvSciSync`, then\n:py:obj:`~.cudaExternalSemaphoreHandleDesc""`::handle::nvSciSyncObj\nrepresents a valid NvSciSyncObj.\n\n:py:obj:`~.cudaExternalSemaphoreHandleTypeKeyedMutex`, then exactly one\nof :py:obj:`~.cudaExternalSemaphoreHandleDesc`::handle::win32::handle\nand :py:obj:`~.cudaExternalSemaphoreHandleDesc`::handle::win32::name\nmust not be NULL. If\n:py:obj:`~.cudaExternalSemaphoreHandleDesc`::handle::win32::handle is\nnot NULL, then it represent a valid shared NT handle that is returned\nby IDXGIResource1::CreateSharedHandle when referring to a\nIDXGIKeyedMutex object.\n\nIf :py:obj:`~.cudaExternalSemaphoreHandleDesc.type` is\n:py:obj:`~.cudaExternalSemaphoreHandleTypeKeyedMutexKmt`, then\n:py:obj:`~.cudaExternalSemaphoreHandleDesc`::handle::win32::handle must\nbe non-NULL and\n:py:obj:`~.cudaExternalSemaphoreHandleDesc`::handle::win32::name must\nbe NULL. The handle specified must represent a valid KMT handle that is\nreturned by IDXGIResource::GetSharedHandle when referring to a\nIDXGIKeyedMutex object.\n\nIf :py:obj:`~.cudaExternalSemaphoreHandleDesc.type` is\n:py:obj:`~.cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd`, then\n:py:obj:`~.cudaExternalSemaphoreHandleDesc`::handle::fd must be a valid\nfile descriptor referencing a synchronization object. Ownership of the\nfile descriptor is transferred to the CUDA driver when the handle is\nimported successfully. Performing any operations on the file descriptor\nafter it is imported results in undefined behavior.\n\nIf :py:obj:`~.cudaExternalSemaphoreHandleDesc.type` is\n:py:obj:`~.cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32`, then\nexactly one of\n:py:obj:`~.cudaExternalSemaphoreHandleDesc`::handle::win32::handle and\n:py:obj:`~.cudaExternalSemaphoreHandleDesc`::handle::win32::name must\nnot be NULL. If\n:py:obj:`~.cudaExternalSemaphoreHandleDesc`::handle::win32::handle is\nnot NULL, then it must represent a valid shared NT handle that\nreferences a synchronization object. Ownership of this handle is not\ntransferred to CUDA after the import operation, so ""the application must\nrelease the handle using the appropriate system call. If\n:py:obj:`~.cudaExternalSemaphoreHandleDesc`::handle::win32::name is not\nNULL, then it must name a valid synchronization object.\n\nParameters\n----------\nsemHandleDesc : :py:obj:`~.cudaExternalSemaphoreHandleDesc`\n Semaphore import handle descriptor\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidResourceHandle`, :py:obj:`~.cudaErrorOperatingSystem`\nextSem_out : :py:obj:`~.cudaExternalSemaphore_t`\n Returned handle to an external semaphore\n\nSee Also\n--------\n:py:obj:`~.cudaDestroyExternalSemaphore`, :py:obj:`~.cudaSignalExternalSemaphoresAsync`, :py:obj:`~.cudaWaitExternalSemaphoresAsync`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_155cudaImportExternalSemaphore = {"cudaImportExternalSemaphore", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_155cudaImportExternalSemaphore, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_154cudaImportExternalSemaphore}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_155cudaImportExternalSemaphore(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaExternalSemaphoreHandleDesc *__pyx_v_semHandleDesc = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaImportExternalSemaphore (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_semHandleDesc,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 18524, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18524, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaImportExternalSemaphore", 0) < (0)) __PYX_ERR(0, 18524, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaImportExternalSemaphore", 1, 1, 1, i); __PYX_ERR(0, 18524, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18524, __pyx_L3_error) } __pyx_v_semHandleDesc = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaExternalSemaphoreHandleDesc *)values[0]); } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaImportExternalSemaphore", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 18524, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaImportExternalSemaphore", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_semHandleDesc), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaExternalSemaphoreHandleDesc, 1, "semHandleDesc", 0))) __PYX_ERR(0, 18525, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_154cudaImportExternalSemaphore(__pyx_self, __pyx_v_semHandleDesc); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_154cudaImportExternalSemaphore(CYTHON_UNUSED PyObject *__pyx_self, struct __pyx_obj_4cuda_8bindings_7runtime_cudaExternalSemaphoreHandleDesc *__pyx_v_semHandleDesc) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaExternalSemaphore_t *__pyx_v_extSem_out = 0; struct cudaExternalSemaphoreHandleDesc *__pyx_v_cysemHandleDesc_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; struct cudaExternalSemaphoreHandleDesc *__pyx_t_5; int __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaImportExternalSemaphore", 0); /* "cuda/bindings/runtime.pyx":18662 * :py:obj:`~.cudaDestroyExternalSemaphore`, :py:obj:`~.cudaSignalExternalSemaphoresAsync`, :py:obj:`~.cudaWaitExternalSemaphoresAsync` * """ * cdef cudaExternalSemaphore_t extSem_out = cudaExternalSemaphore_t() # <<<<<<<<<<<<<< * cdef cyruntime.cudaExternalSemaphoreHandleDesc* cysemHandleDesc_ptr = semHandleDesc._pvt_ptr if semHandleDesc is not None else NULL * with nogil: */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaExternalSemaphore_t); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaExternalSemaphore_t); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (1-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18662, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_extSem_out = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaExternalSemaphore_t *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":18663 * """ * cdef cudaExternalSemaphore_t extSem_out = cudaExternalSemaphore_t() * cdef cyruntime.cudaExternalSemaphoreHandleDesc* cysemHandleDesc_ptr = semHandleDesc._pvt_ptr if semHandleDesc is not None else NULL # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaImportExternalSemaphore(extSem_out._pvt_ptr, cysemHandleDesc_ptr) */ __pyx_t_6 = (((PyObject *)__pyx_v_semHandleDesc) != Py_None); if (__pyx_t_6) { __pyx_t_5 = __pyx_v_semHandleDesc->_pvt_ptr; } else { __pyx_t_5 = NULL; } __pyx_v_cysemHandleDesc_ptr = __pyx_t_5; /* "cuda/bindings/runtime.pyx":18664 * cdef cudaExternalSemaphore_t extSem_out = cudaExternalSemaphore_t() * cdef cyruntime.cudaExternalSemaphoreHandleDesc* cysemHandleDesc_ptr = semHandleDesc._pvt_ptr if semHandleDesc is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaImportExternalSemaphore(extSem_out._pvt_ptr, cysemHandleDesc_ptr) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":18665 * cdef cyruntime.cudaExternalSemaphoreHandleDesc* cysemHandleDesc_ptr = semHandleDesc._pvt_ptr if semHandleDesc is not None else NULL * with nogil: * err = cyruntime.cudaImportExternalSemaphore(extSem_out._pvt_ptr, cysemHandleDesc_ptr) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaImportExternalSemaphore(((cudaExternalSemaphore_t *)__pyx_v_extSem_out->_pvt_ptr), __pyx_v_cysemHandleDesc_ptr); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 18665, __pyx_L4_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":18664 * cdef cudaExternalSemaphore_t extSem_out = cudaExternalSemaphore_t() * cdef cyruntime.cudaExternalSemaphoreHandleDesc* cysemHandleDesc_ptr = semHandleDesc._pvt_ptr if semHandleDesc is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaImportExternalSemaphore(extSem_out._pvt_ptr, cysemHandleDesc_ptr) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":18666 * with nogil: * err = cyruntime.cudaImportExternalSemaphore(extSem_out._pvt_ptr, cysemHandleDesc_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], extSem_out) */ __pyx_t_6 = (__pyx_v_err != cudaSuccess); if (__pyx_t_6) { /* "cuda/bindings/runtime.pyx":18667 * err = cyruntime.cudaImportExternalSemaphore(extSem_out._pvt_ptr, cysemHandleDesc_ptr) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], extSem_out) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18667, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18667, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18667, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18667, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 18667, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 18667, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":18666 * with nogil: * err = cyruntime.cudaImportExternalSemaphore(extSem_out._pvt_ptr, cysemHandleDesc_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], extSem_out) */ } /* "cuda/bindings/runtime.pyx":18668 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], extSem_out) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18668, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18668, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18668, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18668, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 18668, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_extSem_out); __Pyx_GIVEREF((PyObject *)__pyx_v_extSem_out); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_extSem_out)) != (0)) __PYX_ERR(0, 18668, __pyx_L1_error); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":18524 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaImportExternalSemaphore(semHandleDesc : Optional[cudaExternalSemaphoreHandleDesc]): * """ Imports an external semaphore. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaImportExternalSemaphore", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_extSem_out); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":18670 * return (_dict_cudaError_t[err], extSem_out) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaSignalExternalSemaphoresAsync(extSemArray : Optional[tuple[cudaExternalSemaphore_t] | list[cudaExternalSemaphore_t]], paramsArray : Optional[tuple[cudaExternalSemaphoreSignalParams] | list[cudaExternalSemaphoreSignalParams]], unsigned int numExtSems, stream): * """ Signals a set of external semaphore objects. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_157cudaSignalExternalSemaphoresAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_156cudaSignalExternalSemaphoresAsync, "cudaSignalExternalSemaphoresAsync(extSemArray: Optional[tuple[cudaExternalSemaphore_t] | list[cudaExternalSemaphore_t]], paramsArray: Optional[tuple[cudaExternalSemaphoreSignalParams] | list[cudaExternalSemaphoreSignalParams]], unsigned int numExtSems, stream)\n\nSignals a set of external semaphore objects.\n\nEnqueues a signal operation on a set of externally allocated semaphore\nobject in the specified stream. The operations will be executed when\nall prior operations in the stream complete.\n\nThe exact semantics of signaling a semaphore depends on the type of the\nobject.\n\nIf the semaphore object is any one of the following types:\n:py:obj:`~.cudaExternalSemaphoreHandleTypeOpaqueFd`,\n:py:obj:`~.cudaExternalSemaphoreHandleTypeOpaqueWin32`,\n:py:obj:`~.cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt` then\nsignaling the semaphore will set it to the signaled state.\n\nIf the semaphore object is any one of the following types:\n:py:obj:`~.cudaExternalSemaphoreHandleTypeD3D12Fence`,\n:py:obj:`~.cudaExternalSemaphoreHandleTypeD3D11Fence`,\n:py:obj:`~.cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd`,\n:py:obj:`~.cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32` then\nthe semaphore will be set to the value specified in\n:py:obj:`~.cudaExternalSemaphoreSignalParams`::params::fence::value.\n\nIf the semaphore object is of the type\n:py:obj:`~.cudaExternalSemaphoreHandleTypeNvSciSync` this API sets\n:py:obj:`~.cudaExternalSemaphoreSignalParams`::params::nvSciSync::fence\nto a value that can be used by subsequent waiters of the same NvSciSync\nobject to order operations with those currently submitted in `stream`.\nSuch an update will overwrite previous contents of\n:py:obj:`~.cudaExternalSemaphoreSignalParams`::params::nvSciSync::fence.\nBy default, signaling such an external semaphore object causes\nappropriate memory synchronization operations to be performed over all\nthe external memory objects that are imported as\n:py:obj:`~.cudaExternalMemoryHandleTypeNvSciB""uf`. This ensures that any\nsubsequent accesses made by other importers of the same set of NvSciBuf\nmemory object(s) are coherent. These operations can be skipped by\nspecifying the flag\n:py:obj:`~.cudaExternalSemaphoreSignalSkipNvSciBufMemSync`, which can\nbe used as a performance optimization when data coherency is not\nrequired. But specifying this flag in scenarios where data coherency is\nrequired results in undefined behavior. Also, for semaphore object of\nthe type :py:obj:`~.cudaExternalSemaphoreHandleTypeNvSciSync`, if the\nNvSciSyncAttrList used to create the NvSciSyncObj had not set the flags\nin :py:obj:`~.cudaDeviceGetNvSciSyncAttributes` to\ncudaNvSciSyncAttrSignal, this API will return cudaErrorNotSupported.\n\n:py:obj:`~.cudaExternalSemaphoreSignalParams`::params::nvSciSync::fence\nassociated with semaphore object of the type\n:py:obj:`~.cudaExternalSemaphoreHandleTypeNvSciSync` can be\ndeterministic. For this the NvSciSyncAttrList used to create the\nsemaphore object must have value of\nNvSciSyncAttrKey_RequireDeterministicFences key set to true.\nDeterministic fences allow users to enqueue a wait over the semaphore\nobject even before corresponding signal is enqueued. For such a\nsemaphore object, CUDA guarantees that each signal operation will\nincrement the fence value by '1'. Users are expected to track count of\nsignals enqueued on the semaphore object and insert waits accordingly.\nWhen such a semaphore object is signaled from multiple streams, due to\nconcurrent stream execution, it is possible that the order in which the\nsemaphore gets signaled is indeterministic. This could lead to waiters\nof the semaphore getting unblocked incorrectly. Users are expected to\nhandle such situations, either by not using the same semaphore object\nwith deterministic fence support enabled in different streams or by\nadding explicit dependency amongst such streams so that the semaphore\nis signaled in order.\n\nIf the semaphore object is any one of the foll""owing types:\n:py:obj:`~.cudaExternalSemaphoreHandleTypeKeyedMutex`,\n:py:obj:`~.cudaExternalSemaphoreHandleTypeKeyedMutexKmt`, then the\nkeyed mutex will be released with the key specified in\n:py:obj:`~.cudaExternalSemaphoreSignalParams`::params::keyedmutex::key.\n\nParameters\n----------\nextSemArray : list[:py:obj:`~.cudaExternalSemaphore_t`]\n Set of external semaphores to be signaled\nparamsArray : list[:py:obj:`~.cudaExternalSemaphoreSignalParams`]\n Array of semaphore parameters\nnumExtSems : unsigned int\n Number of semaphores to signal\nstream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Stream to enqueue the signal operations in\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidResourceHandle`\n\nSee Also\n--------\n:py:obj:`~.cudaImportExternalSemaphore`, :py:obj:`~.cudaDestroyExternalSemaphore`, :py:obj:`~.cudaWaitExternalSemaphoresAsync`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_157cudaSignalExternalSemaphoresAsync = {"cudaSignalExternalSemaphoresAsync", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_157cudaSignalExternalSemaphoresAsync, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_156cudaSignalExternalSemaphoresAsync}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_157cudaSignalExternalSemaphoresAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_extSemArray = 0; PyObject *__pyx_v_paramsArray = 0; unsigned int __pyx_v_numExtSems; PyObject *__pyx_v_stream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[4] = {0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaSignalExternalSemaphoresAsync (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_extSemArray_2,&__pyx_mstate_global->__pyx_n_u_paramsArray_2,&__pyx_mstate_global->__pyx_n_u_numExtSems_2,&__pyx_mstate_global->__pyx_n_u_stream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 18670, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 18670, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 18670, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 18670, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18670, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaSignalExternalSemaphoresAsync", 0) < (0)) __PYX_ERR(0, 18670, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 4; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaSignalExternalSemaphoresAsync", 1, 4, 4, i); __PYX_ERR(0, 18670, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 4)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18670, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 18670, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 18670, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 18670, __pyx_L3_error) } __pyx_v_extSemArray = values[0]; __pyx_v_paramsArray = values[1]; __pyx_v_numExtSems = __Pyx_PyLong_As_unsigned_int(values[2]); if (unlikely((__pyx_v_numExtSems == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 18671, __pyx_L3_error) __pyx_v_stream = values[3]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaSignalExternalSemaphoresAsync", 1, 4, 4, __pyx_nargs); __PYX_ERR(0, 18670, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaSignalExternalSemaphoresAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_156cudaSignalExternalSemaphoresAsync(__pyx_self, __pyx_v_extSemArray, __pyx_v_paramsArray, __pyx_v_numExtSems, __pyx_v_stream); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_33cudaSignalExternalSemaphoresAsync_2generator83(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value); /* proto */ /* "cuda/bindings/runtime.pyx":18773 * cystream = pstream * paramsArray = [] if paramsArray is None else paramsArray * if not all(isinstance(_x, (cudaExternalSemaphoreSignalParams,)) for _x in paramsArray): # <<<<<<<<<<<<<< * raise TypeError("Argument 'paramsArray' is not instance of type (expected tuple[cyruntime.cudaExternalSemaphoreSignalParams,] or list[cyruntime.cudaExternalSemaphoreSignalParams,]") * extSemArray = [] if extSemArray is None else extSemArray */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_33cudaSignalExternalSemaphoresAsync_genexpr(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_genexpr_arg_0) { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_83_genexpr *__pyx_cur_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("genexpr", 0); __pyx_cur_scope = (struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_83_genexpr *)__pyx_tp_new_4cuda_8bindings_7runtime___pyx_scope_struct_83_genexpr(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime___pyx_scope_struct_83_genexpr, __pyx_mstate_global->__pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_83_genexpr *)Py_None); __Pyx_INCREF(Py_None); __PYX_ERR(0, 18773, __pyx_L1_error) } else { __Pyx_GOTREF((PyObject *)__pyx_cur_scope); } __pyx_cur_scope->__pyx_genexpr_arg_0 = __pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_cur_scope->__pyx_genexpr_arg_0); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_genexpr_arg_0); { __pyx_CoroutineObject *gen = __Pyx_Generator_New((__pyx_coroutine_body_t) __pyx_gb_4cuda_8bindings_7runtime_33cudaSignalExternalSemaphoresAsync_2generator83, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[83]), (PyObject *) __pyx_cur_scope, __pyx_mstate_global->__pyx_n_u_genexpr, __pyx_mstate_global->__pyx_n_u_cudaSignalExternalSemaphoresAsyn, __pyx_mstate_global->__pyx_n_u_cuda_bindings_runtime); if (unlikely(!gen)) __PYX_ERR(0, 18773, __pyx_L1_error) __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; } /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaSignalExternalSemaphoresAsync.genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_DECREF((PyObject *)__pyx_cur_scope); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_33cudaSignalExternalSemaphoresAsync_2generator83(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value) /* generator body */ { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_83_genexpr *__pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_83_genexpr *)__pyx_generator->closure); PyObject *__pyx_r = NULL; PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *(*__pyx_t_3)(PyObject *); PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("genexpr", 0); switch (__pyx_generator->resume_label) { case 0: goto __pyx_L3_first_run; default: /* CPython raises the right error here */ __Pyx_RefNannyFinishContext(); return NULL; } __pyx_L3_first_run:; if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 18773, __pyx_L1_error) if (unlikely(!__pyx_cur_scope->__pyx_genexpr_arg_0)) { __Pyx_RaiseUnboundLocalError(".0"); __PYX_ERR(0, 18773, __pyx_L1_error) } if (likely(PyList_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) || PyTuple_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) { __pyx_t_1 = __pyx_cur_scope->__pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; __pyx_t_3 = NULL; } else { __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_cur_scope->__pyx_genexpr_arg_0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18773, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18773, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_3)) { if (likely(PyList_CheckExact(__pyx_t_1))) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 18773, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } __pyx_t_4 = __Pyx_PyList_GetItemRef(__pyx_t_1, __pyx_t_2); ++__pyx_t_2; } else { { Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 18773, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2)); #else __pyx_t_4 = __Pyx_PySequence_ITEM(__pyx_t_1, __pyx_t_2); #endif ++__pyx_t_2; } if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18773, __pyx_L1_error) } else { __pyx_t_4 = __pyx_t_3(__pyx_t_1); if (unlikely(!__pyx_t_4)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 18773, __pyx_L1_error) PyErr_Clear(); } break; } } __Pyx_GOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v__x); __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v__x, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __Pyx_TypeCheck(__pyx_cur_scope->__pyx_v__x, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaExternalSemaphoreSignalParams); __pyx_t_6 = (!__pyx_t_5); if (__pyx_t_6) { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_False); __pyx_r = Py_False; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_True); __pyx_r = Py_True; goto __pyx_L0; } CYTHON_MAYBE_UNUSED_VAR(__pyx_cur_scope); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); if (__Pyx_PyErr_Occurred()) { __Pyx_Generator_Replace_StopIteration(0); __Pyx_AddTraceback("genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); } __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); #if !CYTHON_USE_EXC_INFO_STACK __Pyx_Coroutine_ResetAndClearException(__pyx_generator); #endif __pyx_generator->resume_label = -1; __Pyx_Coroutine_clear((PyObject*)__pyx_generator); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_33cudaSignalExternalSemaphoresAsync_5generator84(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value); /* proto */ /* "cuda/bindings/runtime.pyx":18776 * raise TypeError("Argument 'paramsArray' is not instance of type (expected tuple[cyruntime.cudaExternalSemaphoreSignalParams,] or list[cyruntime.cudaExternalSemaphoreSignalParams,]") * extSemArray = [] if extSemArray is None else extSemArray * if not all(isinstance(_x, (cudaExternalSemaphore_t,)) for _x in extSemArray): # <<<<<<<<<<<<<< * raise TypeError("Argument 'extSemArray' is not instance of type (expected tuple[cyruntime.cudaExternalSemaphore_t,] or list[cyruntime.cudaExternalSemaphore_t,]") * cdef cyruntime.cudaExternalSemaphore_t* cyextSemArray = NULL */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_33cudaSignalExternalSemaphoresAsync_3genexpr(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_genexpr_arg_0) { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_84_genexpr *__pyx_cur_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("genexpr", 0); __pyx_cur_scope = (struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_84_genexpr *)__pyx_tp_new_4cuda_8bindings_7runtime___pyx_scope_struct_84_genexpr(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime___pyx_scope_struct_84_genexpr, __pyx_mstate_global->__pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_84_genexpr *)Py_None); __Pyx_INCREF(Py_None); __PYX_ERR(0, 18776, __pyx_L1_error) } else { __Pyx_GOTREF((PyObject *)__pyx_cur_scope); } __pyx_cur_scope->__pyx_genexpr_arg_0 = __pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_cur_scope->__pyx_genexpr_arg_0); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_genexpr_arg_0); { __pyx_CoroutineObject *gen = __Pyx_Generator_New((__pyx_coroutine_body_t) __pyx_gb_4cuda_8bindings_7runtime_33cudaSignalExternalSemaphoresAsync_5generator84, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[84]), (PyObject *) __pyx_cur_scope, __pyx_mstate_global->__pyx_n_u_genexpr, __pyx_mstate_global->__pyx_n_u_cudaSignalExternalSemaphoresAsyn, __pyx_mstate_global->__pyx_n_u_cuda_bindings_runtime); if (unlikely(!gen)) __PYX_ERR(0, 18776, __pyx_L1_error) __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; } /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaSignalExternalSemaphoresAsync.genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_DECREF((PyObject *)__pyx_cur_scope); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_33cudaSignalExternalSemaphoresAsync_5generator84(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value) /* generator body */ { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_84_genexpr *__pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_84_genexpr *)__pyx_generator->closure); PyObject *__pyx_r = NULL; PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *(*__pyx_t_3)(PyObject *); PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("genexpr", 0); switch (__pyx_generator->resume_label) { case 0: goto __pyx_L3_first_run; default: /* CPython raises the right error here */ __Pyx_RefNannyFinishContext(); return NULL; } __pyx_L3_first_run:; if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 18776, __pyx_L1_error) if (unlikely(!__pyx_cur_scope->__pyx_genexpr_arg_0)) { __Pyx_RaiseUnboundLocalError(".0"); __PYX_ERR(0, 18776, __pyx_L1_error) } if (likely(PyList_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) || PyTuple_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) { __pyx_t_1 = __pyx_cur_scope->__pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; __pyx_t_3 = NULL; } else { __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_cur_scope->__pyx_genexpr_arg_0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18776, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18776, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_3)) { if (likely(PyList_CheckExact(__pyx_t_1))) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 18776, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } __pyx_t_4 = __Pyx_PyList_GetItemRef(__pyx_t_1, __pyx_t_2); ++__pyx_t_2; } else { { Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 18776, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2)); #else __pyx_t_4 = __Pyx_PySequence_ITEM(__pyx_t_1, __pyx_t_2); #endif ++__pyx_t_2; } if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18776, __pyx_L1_error) } else { __pyx_t_4 = __pyx_t_3(__pyx_t_1); if (unlikely(!__pyx_t_4)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 18776, __pyx_L1_error) PyErr_Clear(); } break; } } __Pyx_GOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v__x); __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v__x, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __Pyx_TypeCheck(__pyx_cur_scope->__pyx_v__x, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaExternalSemaphore_t); __pyx_t_6 = (!__pyx_t_5); if (__pyx_t_6) { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_False); __pyx_r = Py_False; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_True); __pyx_r = Py_True; goto __pyx_L0; } CYTHON_MAYBE_UNUSED_VAR(__pyx_cur_scope); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); if (__Pyx_PyErr_Occurred()) { __Pyx_Generator_Replace_StopIteration(0); __Pyx_AddTraceback("genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); } __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); #if !CYTHON_USE_EXC_INFO_STACK __Pyx_Coroutine_ResetAndClearException(__pyx_generator); #endif __pyx_generator->resume_label = -1; __Pyx_Coroutine_clear((PyObject*)__pyx_generator); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":18670 * return (_dict_cudaError_t[err], extSem_out) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaSignalExternalSemaphoresAsync(extSemArray : Optional[tuple[cudaExternalSemaphore_t] | list[cudaExternalSemaphore_t]], paramsArray : Optional[tuple[cudaExternalSemaphoreSignalParams] | list[cudaExternalSemaphoreSignalParams]], unsigned int numExtSems, stream): * """ Signals a set of external semaphore objects. */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_156cudaSignalExternalSemaphoresAsync(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_extSemArray, PyObject *__pyx_v_paramsArray, unsigned int __pyx_v_numExtSems, PyObject *__pyx_v_stream) { cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; cudaExternalSemaphore_t *__pyx_v_cyextSemArray; Py_ssize_t __pyx_v_idx; struct cudaExternalSemaphoreSignalParams *__pyx_v_cyparamsArray; cudaError_t __pyx_v_err; PyObject *__pyx_gb_4cuda_8bindings_7runtime_33cudaSignalExternalSemaphoresAsync_2generator83 = 0; PyObject *__pyx_gb_4cuda_8bindings_7runtime_33cudaSignalExternalSemaphoresAsync_5generator84 = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; Py_ssize_t __pyx_t_8; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; Py_ssize_t __pyx_t_12; Py_ssize_t __pyx_t_13; struct cudaExternalSemaphoreSignalParams *__pyx_t_14; cudaError_t __pyx_t_15; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaSignalExternalSemaphoresAsync", 0); __Pyx_INCREF(__pyx_v_extSemArray); __Pyx_INCREF(__pyx_v_paramsArray); /* "cuda/bindings/runtime.pyx":18765 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18766 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":18765 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":18767 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18768 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18768, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":18767 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":18770 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * paramsArray = [] if paramsArray is None else paramsArray */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_stream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18770, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18770, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":18771 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * paramsArray = [] if paramsArray is None else paramsArray * if not all(isinstance(_x, (cudaExternalSemaphoreSignalParams,)) for _x in paramsArray): */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 18771, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":18772 * pstream = int(cudaStream_t(stream)) * cystream = pstream * paramsArray = [] if paramsArray is None else paramsArray # <<<<<<<<<<<<<< * if not all(isinstance(_x, (cudaExternalSemaphoreSignalParams,)) for _x in paramsArray): * raise TypeError("Argument 'paramsArray' is not instance of type (expected tuple[cyruntime.cudaExternalSemaphoreSignalParams,] or list[cyruntime.cudaExternalSemaphoreSignalParams,]") */ __pyx_t_1 = (__pyx_v_paramsArray == Py_None); if (__pyx_t_1) { __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18772, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __pyx_t_3; __pyx_t_3 = 0; } else { __Pyx_INCREF(__pyx_v_paramsArray); __pyx_t_5 = __pyx_v_paramsArray; } __Pyx_DECREF_SET(__pyx_v_paramsArray, __pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":18773 * cystream = pstream * paramsArray = [] if paramsArray is None else paramsArray * if not all(isinstance(_x, (cudaExternalSemaphoreSignalParams,)) for _x in paramsArray): # <<<<<<<<<<<<<< * raise TypeError("Argument 'paramsArray' is not instance of type (expected tuple[cyruntime.cudaExternalSemaphoreSignalParams,] or list[cyruntime.cudaExternalSemaphoreSignalParams,]") * extSemArray = [] if extSemArray is None else extSemArray */ __pyx_t_5 = __pyx_pf_4cuda_8bindings_7runtime_33cudaSignalExternalSemaphoresAsync_genexpr(NULL, __pyx_v_paramsArray); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18773, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_Generator_GetInlinedResult(__pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18773, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 18773, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_2 = (!__pyx_t_1); if (unlikely(__pyx_t_2)) { /* "cuda/bindings/runtime.pyx":18774 * paramsArray = [] if paramsArray is None else paramsArray * if not all(isinstance(_x, (cudaExternalSemaphoreSignalParams,)) for _x in paramsArray): * raise TypeError("Argument 'paramsArray' is not instance of type (expected tuple[cyruntime.cudaExternalSemaphoreSignalParams,] or list[cyruntime.cudaExternalSemaphoreSignalParams,]") # <<<<<<<<<<<<<< * extSemArray = [] if extSemArray is None else extSemArray * if not all(isinstance(_x, (cudaExternalSemaphore_t,)) for _x in extSemArray): */ __pyx_t_5 = NULL; __Pyx_INCREF(__pyx_builtin_TypeError); __pyx_t_4 = __pyx_builtin_TypeError; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_mstate_global->__pyx_kp_u_Argument_paramsArray_is_not_inst}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18774, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 18774, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":18773 * cystream = pstream * paramsArray = [] if paramsArray is None else paramsArray * if not all(isinstance(_x, (cudaExternalSemaphoreSignalParams,)) for _x in paramsArray): # <<<<<<<<<<<<<< * raise TypeError("Argument 'paramsArray' is not instance of type (expected tuple[cyruntime.cudaExternalSemaphoreSignalParams,] or list[cyruntime.cudaExternalSemaphoreSignalParams,]") * extSemArray = [] if extSemArray is None else extSemArray */ } /* "cuda/bindings/runtime.pyx":18775 * if not all(isinstance(_x, (cudaExternalSemaphoreSignalParams,)) for _x in paramsArray): * raise TypeError("Argument 'paramsArray' is not instance of type (expected tuple[cyruntime.cudaExternalSemaphoreSignalParams,] or list[cyruntime.cudaExternalSemaphoreSignalParams,]") * extSemArray = [] if extSemArray is None else extSemArray # <<<<<<<<<<<<<< * if not all(isinstance(_x, (cudaExternalSemaphore_t,)) for _x in extSemArray): * raise TypeError("Argument 'extSemArray' is not instance of type (expected tuple[cyruntime.cudaExternalSemaphore_t,] or list[cyruntime.cudaExternalSemaphore_t,]") */ __pyx_t_2 = (__pyx_v_extSemArray == Py_None); if (__pyx_t_2) { __pyx_t_4 = PyList_New(0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18775, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; } else { __Pyx_INCREF(__pyx_v_extSemArray); __pyx_t_3 = __pyx_v_extSemArray; } __Pyx_DECREF_SET(__pyx_v_extSemArray, __pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":18776 * raise TypeError("Argument 'paramsArray' is not instance of type (expected tuple[cyruntime.cudaExternalSemaphoreSignalParams,] or list[cyruntime.cudaExternalSemaphoreSignalParams,]") * extSemArray = [] if extSemArray is None else extSemArray * if not all(isinstance(_x, (cudaExternalSemaphore_t,)) for _x in extSemArray): # <<<<<<<<<<<<<< * raise TypeError("Argument 'extSemArray' is not instance of type (expected tuple[cyruntime.cudaExternalSemaphore_t,] or list[cyruntime.cudaExternalSemaphore_t,]") * cdef cyruntime.cudaExternalSemaphore_t* cyextSemArray = NULL */ __pyx_t_3 = __pyx_pf_4cuda_8bindings_7runtime_33cudaSignalExternalSemaphoresAsync_3genexpr(NULL, __pyx_v_extSemArray); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18776, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_Generator_GetInlinedResult(__pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18776, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 18776, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_1 = (!__pyx_t_2); if (unlikely(__pyx_t_1)) { /* "cuda/bindings/runtime.pyx":18777 * extSemArray = [] if extSemArray is None else extSemArray * if not all(isinstance(_x, (cudaExternalSemaphore_t,)) for _x in extSemArray): * raise TypeError("Argument 'extSemArray' is not instance of type (expected tuple[cyruntime.cudaExternalSemaphore_t,] or list[cyruntime.cudaExternalSemaphore_t,]") # <<<<<<<<<<<<<< * cdef cyruntime.cudaExternalSemaphore_t* cyextSemArray = NULL * if len(extSemArray) > 1: */ __pyx_t_3 = NULL; __Pyx_INCREF(__pyx_builtin_TypeError); __pyx_t_5 = __pyx_builtin_TypeError; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Argument_extSemArray_is_not_inst}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18777, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); } __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(0, 18777, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":18776 * raise TypeError("Argument 'paramsArray' is not instance of type (expected tuple[cyruntime.cudaExternalSemaphoreSignalParams,] or list[cyruntime.cudaExternalSemaphoreSignalParams,]") * extSemArray = [] if extSemArray is None else extSemArray * if not all(isinstance(_x, (cudaExternalSemaphore_t,)) for _x in extSemArray): # <<<<<<<<<<<<<< * raise TypeError("Argument 'extSemArray' is not instance of type (expected tuple[cyruntime.cudaExternalSemaphore_t,] or list[cyruntime.cudaExternalSemaphore_t,]") * cdef cyruntime.cudaExternalSemaphore_t* cyextSemArray = NULL */ } /* "cuda/bindings/runtime.pyx":18778 * if not all(isinstance(_x, (cudaExternalSemaphore_t,)) for _x in extSemArray): * raise TypeError("Argument 'extSemArray' is not instance of type (expected tuple[cyruntime.cudaExternalSemaphore_t,] or list[cyruntime.cudaExternalSemaphore_t,]") * cdef cyruntime.cudaExternalSemaphore_t* cyextSemArray = NULL # <<<<<<<<<<<<<< * if len(extSemArray) > 1: * cyextSemArray = calloc(len(extSemArray), sizeof(cyruntime.cudaExternalSemaphore_t)) */ __pyx_v_cyextSemArray = NULL; /* "cuda/bindings/runtime.pyx":18779 * raise TypeError("Argument 'extSemArray' is not instance of type (expected tuple[cyruntime.cudaExternalSemaphore_t,] or list[cyruntime.cudaExternalSemaphore_t,]") * cdef cyruntime.cudaExternalSemaphore_t* cyextSemArray = NULL * if len(extSemArray) > 1: # <<<<<<<<<<<<<< * cyextSemArray = calloc(len(extSemArray), sizeof(cyruntime.cudaExternalSemaphore_t)) * if cyextSemArray is NULL: */ __pyx_t_8 = PyObject_Length(__pyx_v_extSemArray); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18779, __pyx_L1_error) __pyx_t_1 = (__pyx_t_8 > 1); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18780 * cdef cyruntime.cudaExternalSemaphore_t* cyextSemArray = NULL * if len(extSemArray) > 1: * cyextSemArray = calloc(len(extSemArray), sizeof(cyruntime.cudaExternalSemaphore_t)) # <<<<<<<<<<<<<< * if cyextSemArray is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(extSemArray)) + 'x' + str(sizeof(cyruntime.cudaExternalSemaphore_t))) */ __pyx_t_8 = PyObject_Length(__pyx_v_extSemArray); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18780, __pyx_L1_error) __pyx_v_cyextSemArray = ((cudaExternalSemaphore_t *)calloc(__pyx_t_8, (sizeof(cudaExternalSemaphore_t)))); /* "cuda/bindings/runtime.pyx":18781 * if len(extSemArray) > 1: * cyextSemArray = calloc(len(extSemArray), sizeof(cyruntime.cudaExternalSemaphore_t)) * if cyextSemArray is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(extSemArray)) + 'x' + str(sizeof(cyruntime.cudaExternalSemaphore_t))) * else: */ __pyx_t_1 = (__pyx_v_cyextSemArray == NULL); if (unlikely(__pyx_t_1)) { /* "cuda/bindings/runtime.pyx":18782 * cyextSemArray = calloc(len(extSemArray), sizeof(cyruntime.cudaExternalSemaphore_t)) * if cyextSemArray is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(extSemArray)) + 'x' + str(sizeof(cyruntime.cudaExternalSemaphore_t))) # <<<<<<<<<<<<<< * else: * for idx in range(len(extSemArray)): */ __pyx_t_5 = NULL; __Pyx_INCREF(__pyx_builtin_MemoryError); __pyx_t_3 = __pyx_builtin_MemoryError; __pyx_t_8 = PyObject_Length(__pyx_v_extSemArray); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18782, __pyx_L1_error) __pyx_t_9 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 18782, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 18782, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_Failed_to_allocate_length_x_size, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 18782, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_x_2); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 18782, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyLong_FromSize_t((sizeof(cudaExternalSemaphore_t))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 18782, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_9); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 18782, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_10, __pyx_t_11); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 18782, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_9}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18782, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); } __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(0, 18782, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":18781 * if len(extSemArray) > 1: * cyextSemArray = calloc(len(extSemArray), sizeof(cyruntime.cudaExternalSemaphore_t)) * if cyextSemArray is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(extSemArray)) + 'x' + str(sizeof(cyruntime.cudaExternalSemaphore_t))) * else: */ } /* "cuda/bindings/runtime.pyx":18784 * raise MemoryError('Failed to allocate length x size memory: ' + str(len(extSemArray)) + 'x' + str(sizeof(cyruntime.cudaExternalSemaphore_t))) * else: * for idx in range(len(extSemArray)): # <<<<<<<<<<<<<< * cyextSemArray[idx] = (extSemArray[idx])._pvt_ptr[0] * elif len(extSemArray) == 1: */ /*else*/ { __pyx_t_8 = PyObject_Length(__pyx_v_extSemArray); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18784, __pyx_L1_error) __pyx_t_12 = __pyx_t_8; for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { __pyx_v_idx = __pyx_t_13; /* "cuda/bindings/runtime.pyx":18785 * else: * for idx in range(len(extSemArray)): * cyextSemArray[idx] = (extSemArray[idx])._pvt_ptr[0] # <<<<<<<<<<<<<< * elif len(extSemArray) == 1: * cyextSemArray = (extSemArray[0])._pvt_ptr */ __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_extSemArray, __pyx_v_idx, Py_ssize_t, 1, PyLong_FromSsize_t, 0, 1, 1, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18785, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); (__pyx_v_cyextSemArray[__pyx_v_idx]) = ((cudaExternalSemaphore_t)(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaExternalSemaphore_t *)__pyx_t_4)->_pvt_ptr[0])); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } } /* "cuda/bindings/runtime.pyx":18779 * raise TypeError("Argument 'extSemArray' is not instance of type (expected tuple[cyruntime.cudaExternalSemaphore_t,] or list[cyruntime.cudaExternalSemaphore_t,]") * cdef cyruntime.cudaExternalSemaphore_t* cyextSemArray = NULL * if len(extSemArray) > 1: # <<<<<<<<<<<<<< * cyextSemArray = calloc(len(extSemArray), sizeof(cyruntime.cudaExternalSemaphore_t)) * if cyextSemArray is NULL: */ goto __pyx_L8; } /* "cuda/bindings/runtime.pyx":18786 * for idx in range(len(extSemArray)): * cyextSemArray[idx] = (extSemArray[idx])._pvt_ptr[0] * elif len(extSemArray) == 1: # <<<<<<<<<<<<<< * cyextSemArray = (extSemArray[0])._pvt_ptr * cdef cyruntime.cudaExternalSemaphoreSignalParams* cyparamsArray = NULL */ __pyx_t_8 = PyObject_Length(__pyx_v_extSemArray); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18786, __pyx_L1_error) __pyx_t_1 = (__pyx_t_8 == 1); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18787 * cyextSemArray[idx] = (extSemArray[idx])._pvt_ptr[0] * elif len(extSemArray) == 1: * cyextSemArray = (extSemArray[0])._pvt_ptr # <<<<<<<<<<<<<< * cdef cyruntime.cudaExternalSemaphoreSignalParams* cyparamsArray = NULL * if len(paramsArray) > 1: */ __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_extSemArray, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18787, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_v_cyextSemArray = ((cudaExternalSemaphore_t *)((struct __pyx_obj_4cuda_8bindings_7runtime_cudaExternalSemaphore_t *)__pyx_t_4)->_pvt_ptr); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "cuda/bindings/runtime.pyx":18786 * for idx in range(len(extSemArray)): * cyextSemArray[idx] = (extSemArray[idx])._pvt_ptr[0] * elif len(extSemArray) == 1: # <<<<<<<<<<<<<< * cyextSemArray = (extSemArray[0])._pvt_ptr * cdef cyruntime.cudaExternalSemaphoreSignalParams* cyparamsArray = NULL */ } __pyx_L8:; /* "cuda/bindings/runtime.pyx":18788 * elif len(extSemArray) == 1: * cyextSemArray = (extSemArray[0])._pvt_ptr * cdef cyruntime.cudaExternalSemaphoreSignalParams* cyparamsArray = NULL # <<<<<<<<<<<<<< * if len(paramsArray) > 1: * cyparamsArray = calloc(len(paramsArray), sizeof(cyruntime.cudaExternalSemaphoreSignalParams)) */ __pyx_v_cyparamsArray = NULL; /* "cuda/bindings/runtime.pyx":18789 * cyextSemArray = (extSemArray[0])._pvt_ptr * cdef cyruntime.cudaExternalSemaphoreSignalParams* cyparamsArray = NULL * if len(paramsArray) > 1: # <<<<<<<<<<<<<< * cyparamsArray = calloc(len(paramsArray), sizeof(cyruntime.cudaExternalSemaphoreSignalParams)) * if cyparamsArray is NULL: */ __pyx_t_8 = PyObject_Length(__pyx_v_paramsArray); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18789, __pyx_L1_error) __pyx_t_1 = (__pyx_t_8 > 1); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18790 * cdef cyruntime.cudaExternalSemaphoreSignalParams* cyparamsArray = NULL * if len(paramsArray) > 1: * cyparamsArray = calloc(len(paramsArray), sizeof(cyruntime.cudaExternalSemaphoreSignalParams)) # <<<<<<<<<<<<<< * if cyparamsArray is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(paramsArray)) + 'x' + str(sizeof(cyruntime.cudaExternalSemaphoreSignalParams))) */ __pyx_t_8 = PyObject_Length(__pyx_v_paramsArray); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18790, __pyx_L1_error) __pyx_v_cyparamsArray = ((struct cudaExternalSemaphoreSignalParams *)calloc(__pyx_t_8, (sizeof(struct cudaExternalSemaphoreSignalParams)))); /* "cuda/bindings/runtime.pyx":18791 * if len(paramsArray) > 1: * cyparamsArray = calloc(len(paramsArray), sizeof(cyruntime.cudaExternalSemaphoreSignalParams)) * if cyparamsArray is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(paramsArray)) + 'x' + str(sizeof(cyruntime.cudaExternalSemaphoreSignalParams))) * for idx in range(len(paramsArray)): */ __pyx_t_1 = (__pyx_v_cyparamsArray == NULL); if (unlikely(__pyx_t_1)) { /* "cuda/bindings/runtime.pyx":18792 * cyparamsArray = calloc(len(paramsArray), sizeof(cyruntime.cudaExternalSemaphoreSignalParams)) * if cyparamsArray is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(paramsArray)) + 'x' + str(sizeof(cyruntime.cudaExternalSemaphoreSignalParams))) # <<<<<<<<<<<<<< * for idx in range(len(paramsArray)): * string.memcpy(&cyparamsArray[idx], (paramsArray[idx])._pvt_ptr, sizeof(cyruntime.cudaExternalSemaphoreSignalParams)) */ __pyx_t_3 = NULL; __Pyx_INCREF(__pyx_builtin_MemoryError); __pyx_t_9 = __pyx_builtin_MemoryError; __pyx_t_8 = PyObject_Length(__pyx_v_paramsArray); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18792, __pyx_L1_error) __pyx_t_5 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18792, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 18792, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_Failed_to_allocate_length_x_size, __pyx_t_11); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18792, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_11 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_x_2); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 18792, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyLong_FromSize_t((sizeof(struct cudaExternalSemaphoreSignalParams))); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18792, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 18792, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_11, __pyx_t_10); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18792, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_5}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_9, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18792, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); } __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(0, 18792, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":18791 * if len(paramsArray) > 1: * cyparamsArray = calloc(len(paramsArray), sizeof(cyruntime.cudaExternalSemaphoreSignalParams)) * if cyparamsArray is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(paramsArray)) + 'x' + str(sizeof(cyruntime.cudaExternalSemaphoreSignalParams))) * for idx in range(len(paramsArray)): */ } /* "cuda/bindings/runtime.pyx":18793 * if cyparamsArray is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(paramsArray)) + 'x' + str(sizeof(cyruntime.cudaExternalSemaphoreSignalParams))) * for idx in range(len(paramsArray)): # <<<<<<<<<<<<<< * string.memcpy(&cyparamsArray[idx], (paramsArray[idx])._pvt_ptr, sizeof(cyruntime.cudaExternalSemaphoreSignalParams)) * elif len(paramsArray) == 1: */ __pyx_t_8 = PyObject_Length(__pyx_v_paramsArray); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18793, __pyx_L1_error) __pyx_t_12 = __pyx_t_8; for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { __pyx_v_idx = __pyx_t_13; /* "cuda/bindings/runtime.pyx":18794 * raise MemoryError('Failed to allocate length x size memory: ' + str(len(paramsArray)) + 'x' + str(sizeof(cyruntime.cudaExternalSemaphoreSignalParams))) * for idx in range(len(paramsArray)): * string.memcpy(&cyparamsArray[idx], (paramsArray[idx])._pvt_ptr, sizeof(cyruntime.cudaExternalSemaphoreSignalParams)) # <<<<<<<<<<<<<< * elif len(paramsArray) == 1: * cyparamsArray = (paramsArray[0])._pvt_ptr */ __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_paramsArray, __pyx_v_idx, Py_ssize_t, 1, PyLong_FromSsize_t, 0, 1, 1, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18794, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); (void)(memcpy((&(__pyx_v_cyparamsArray[__pyx_v_idx])), ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaExternalSemaphoreSignalParams *)__pyx_t_4)->_pvt_ptr, (sizeof(struct cudaExternalSemaphoreSignalParams)))); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } /* "cuda/bindings/runtime.pyx":18789 * cyextSemArray = (extSemArray[0])._pvt_ptr * cdef cyruntime.cudaExternalSemaphoreSignalParams* cyparamsArray = NULL * if len(paramsArray) > 1: # <<<<<<<<<<<<<< * cyparamsArray = calloc(len(paramsArray), sizeof(cyruntime.cudaExternalSemaphoreSignalParams)) * if cyparamsArray is NULL: */ goto __pyx_L12; } /* "cuda/bindings/runtime.pyx":18795 * for idx in range(len(paramsArray)): * string.memcpy(&cyparamsArray[idx], (paramsArray[idx])._pvt_ptr, sizeof(cyruntime.cudaExternalSemaphoreSignalParams)) * elif len(paramsArray) == 1: # <<<<<<<<<<<<<< * cyparamsArray = (paramsArray[0])._pvt_ptr * if numExtSems > len(extSemArray): raise RuntimeError("List is too small: " + str(len(extSemArray)) + " < " + str(numExtSems)) */ __pyx_t_8 = PyObject_Length(__pyx_v_paramsArray); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18795, __pyx_L1_error) __pyx_t_1 = (__pyx_t_8 == 1); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18796 * string.memcpy(&cyparamsArray[idx], (paramsArray[idx])._pvt_ptr, sizeof(cyruntime.cudaExternalSemaphoreSignalParams)) * elif len(paramsArray) == 1: * cyparamsArray = (paramsArray[0])._pvt_ptr # <<<<<<<<<<<<<< * if numExtSems > len(extSemArray): raise RuntimeError("List is too small: " + str(len(extSemArray)) + " < " + str(numExtSems)) * if numExtSems > len(paramsArray): raise RuntimeError("List is too small: " + str(len(paramsArray)) + " < " + str(numExtSems)) */ __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_paramsArray, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18796, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_14 = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaExternalSemaphoreSignalParams *)__pyx_t_4)->_pvt_ptr; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_cyparamsArray = __pyx_t_14; /* "cuda/bindings/runtime.pyx":18795 * for idx in range(len(paramsArray)): * string.memcpy(&cyparamsArray[idx], (paramsArray[idx])._pvt_ptr, sizeof(cyruntime.cudaExternalSemaphoreSignalParams)) * elif len(paramsArray) == 1: # <<<<<<<<<<<<<< * cyparamsArray = (paramsArray[0])._pvt_ptr * if numExtSems > len(extSemArray): raise RuntimeError("List is too small: " + str(len(extSemArray)) + " < " + str(numExtSems)) */ } __pyx_L12:; /* "cuda/bindings/runtime.pyx":18797 * elif len(paramsArray) == 1: * cyparamsArray = (paramsArray[0])._pvt_ptr * if numExtSems > len(extSemArray): raise RuntimeError("List is too small: " + str(len(extSemArray)) + " < " + str(numExtSems)) # <<<<<<<<<<<<<< * if numExtSems > len(paramsArray): raise RuntimeError("List is too small: " + str(len(paramsArray)) + " < " + str(numExtSems)) * with nogil: */ __pyx_t_8 = PyObject_Length(__pyx_v_extSemArray); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18797, __pyx_L1_error) __pyx_t_1 = (__pyx_v_numExtSems > __pyx_t_8); if (unlikely(__pyx_t_1)) { __pyx_t_9 = NULL; __Pyx_INCREF(__pyx_builtin_RuntimeError); __pyx_t_5 = __pyx_builtin_RuntimeError; __pyx_t_8 = PyObject_Length(__pyx_v_extSemArray); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18797, __pyx_L1_error) __pyx_t_3 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18797, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 18797, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_List_is_too_small, __pyx_t_10); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18797, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_3, __pyx_mstate_global->__pyx_kp_u__6); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 18797, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyLong_From_unsigned_int(__pyx_v_numExtSems); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18797, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 18797, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_10, __pyx_t_11); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18797, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_9, __pyx_t_3}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18797, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); } __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(0, 18797, __pyx_L1_error) } /* "cuda/bindings/runtime.pyx":18798 * cyparamsArray = (paramsArray[0])._pvt_ptr * if numExtSems > len(extSemArray): raise RuntimeError("List is too small: " + str(len(extSemArray)) + " < " + str(numExtSems)) * if numExtSems > len(paramsArray): raise RuntimeError("List is too small: " + str(len(paramsArray)) + " < " + str(numExtSems)) # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaSignalExternalSemaphoresAsync(cyextSemArray, cyparamsArray, numExtSems, cystream) */ __pyx_t_8 = PyObject_Length(__pyx_v_paramsArray); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18798, __pyx_L1_error) __pyx_t_1 = (__pyx_v_numExtSems > __pyx_t_8); if (unlikely(__pyx_t_1)) { __pyx_t_5 = NULL; __Pyx_INCREF(__pyx_builtin_RuntimeError); __pyx_t_3 = __pyx_builtin_RuntimeError; __pyx_t_8 = PyObject_Length(__pyx_v_paramsArray); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18798, __pyx_L1_error) __pyx_t_9 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 18798, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_9); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 18798, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_List_is_too_small, __pyx_t_11); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 18798, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_11 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_9, __pyx_mstate_global->__pyx_kp_u__6); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 18798, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyLong_From_unsigned_int(__pyx_v_numExtSems); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 18798, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 18798, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_11, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 18798, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_9}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18798, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); } __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(0, 18798, __pyx_L1_error) } /* "cuda/bindings/runtime.pyx":18799 * if numExtSems > len(extSemArray): raise RuntimeError("List is too small: " + str(len(extSemArray)) + " < " + str(numExtSems)) * if numExtSems > len(paramsArray): raise RuntimeError("List is too small: " + str(len(paramsArray)) + " < " + str(numExtSems)) * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaSignalExternalSemaphoresAsync(cyextSemArray, cyparamsArray, numExtSems, cystream) * if len(extSemArray) > 1 and cyextSemArray is not NULL: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":18800 * if numExtSems > len(paramsArray): raise RuntimeError("List is too small: " + str(len(paramsArray)) + " < " + str(numExtSems)) * with nogil: * err = cyruntime.cudaSignalExternalSemaphoresAsync(cyextSemArray, cyparamsArray, numExtSems, cystream) # <<<<<<<<<<<<<< * if len(extSemArray) > 1 and cyextSemArray is not NULL: * free(cyextSemArray) */ __pyx_t_15 = __pyx_f_4cuda_8bindings_9cyruntime_cudaSignalExternalSemaphoresAsync(__pyx_v_cyextSemArray, __pyx_v_cyparamsArray, __pyx_v_numExtSems, __pyx_v_cystream); if (unlikely(__pyx_t_15 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 18800, __pyx_L19_error) __pyx_v_err = __pyx_t_15; } /* "cuda/bindings/runtime.pyx":18799 * if numExtSems > len(extSemArray): raise RuntimeError("List is too small: " + str(len(extSemArray)) + " < " + str(numExtSems)) * if numExtSems > len(paramsArray): raise RuntimeError("List is too small: " + str(len(paramsArray)) + " < " + str(numExtSems)) * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaSignalExternalSemaphoresAsync(cyextSemArray, cyparamsArray, numExtSems, cystream) * if len(extSemArray) > 1 and cyextSemArray is not NULL: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L20; } __pyx_L19_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L20:; } } /* "cuda/bindings/runtime.pyx":18801 * with nogil: * err = cyruntime.cudaSignalExternalSemaphoresAsync(cyextSemArray, cyparamsArray, numExtSems, cystream) * if len(extSemArray) > 1 and cyextSemArray is not NULL: # <<<<<<<<<<<<<< * free(cyextSemArray) * if len(paramsArray) > 1 and cyparamsArray is not NULL: */ __pyx_t_8 = PyObject_Length(__pyx_v_extSemArray); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18801, __pyx_L1_error) __pyx_t_2 = (__pyx_t_8 > 1); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L22_bool_binop_done; } __pyx_t_2 = (__pyx_v_cyextSemArray != NULL); __pyx_t_1 = __pyx_t_2; __pyx_L22_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18802 * err = cyruntime.cudaSignalExternalSemaphoresAsync(cyextSemArray, cyparamsArray, numExtSems, cystream) * if len(extSemArray) > 1 and cyextSemArray is not NULL: * free(cyextSemArray) # <<<<<<<<<<<<<< * if len(paramsArray) > 1 and cyparamsArray is not NULL: * free(cyparamsArray) */ free(__pyx_v_cyextSemArray); /* "cuda/bindings/runtime.pyx":18801 * with nogil: * err = cyruntime.cudaSignalExternalSemaphoresAsync(cyextSemArray, cyparamsArray, numExtSems, cystream) * if len(extSemArray) > 1 and cyextSemArray is not NULL: # <<<<<<<<<<<<<< * free(cyextSemArray) * if len(paramsArray) > 1 and cyparamsArray is not NULL: */ } /* "cuda/bindings/runtime.pyx":18803 * if len(extSemArray) > 1 and cyextSemArray is not NULL: * free(cyextSemArray) * if len(paramsArray) > 1 and cyparamsArray is not NULL: # <<<<<<<<<<<<<< * free(cyparamsArray) * return (_dict_cudaError_t[err],) */ __pyx_t_8 = PyObject_Length(__pyx_v_paramsArray); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18803, __pyx_L1_error) __pyx_t_2 = (__pyx_t_8 > 1); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L25_bool_binop_done; } __pyx_t_2 = (__pyx_v_cyparamsArray != NULL); __pyx_t_1 = __pyx_t_2; __pyx_L25_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18804 * free(cyextSemArray) * if len(paramsArray) > 1 and cyparamsArray is not NULL: * free(cyparamsArray) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ free(__pyx_v_cyparamsArray); /* "cuda/bindings/runtime.pyx":18803 * if len(extSemArray) > 1 and cyextSemArray is not NULL: * free(cyextSemArray) * if len(paramsArray) > 1 and cyparamsArray is not NULL: # <<<<<<<<<<<<<< * free(cyparamsArray) * return (_dict_cudaError_t[err],) */ } /* "cuda/bindings/runtime.pyx":18805 * if len(paramsArray) > 1 and cyparamsArray is not NULL: * free(cyparamsArray) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18805, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18805, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_9 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 18805, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18805, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_9); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_9) != (0)) __PYX_ERR(0, 18805, __pyx_L1_error); __pyx_t_9 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":18670 * return (_dict_cudaError_t[err], extSem_out) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaSignalExternalSemaphoresAsync(extSemArray : Optional[tuple[cudaExternalSemaphore_t] | list[cudaExternalSemaphore_t]], paramsArray : Optional[tuple[cudaExternalSemaphoreSignalParams] | list[cudaExternalSemaphoreSignalParams]], unsigned int numExtSems, stream): * """ Signals a set of external semaphore objects. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("cuda.bindings.runtime.cudaSignalExternalSemaphoresAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XDECREF(__pyx_gb_4cuda_8bindings_7runtime_33cudaSignalExternalSemaphoresAsync_2generator83); __Pyx_XDECREF(__pyx_gb_4cuda_8bindings_7runtime_33cudaSignalExternalSemaphoresAsync_5generator84); __Pyx_XDECREF(__pyx_v_extSemArray); __Pyx_XDECREF(__pyx_v_paramsArray); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":18807 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaWaitExternalSemaphoresAsync(extSemArray : Optional[tuple[cudaExternalSemaphore_t] | list[cudaExternalSemaphore_t]], paramsArray : Optional[tuple[cudaExternalSemaphoreWaitParams] | list[cudaExternalSemaphoreWaitParams]], unsigned int numExtSems, stream): * """ Waits on a set of external semaphore objects. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_159cudaWaitExternalSemaphoresAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_158cudaWaitExternalSemaphoresAsync, "cudaWaitExternalSemaphoresAsync(extSemArray: Optional[tuple[cudaExternalSemaphore_t] | list[cudaExternalSemaphore_t]], paramsArray: Optional[tuple[cudaExternalSemaphoreWaitParams] | list[cudaExternalSemaphoreWaitParams]], unsigned int numExtSems, stream)\n\nWaits on a set of external semaphore objects.\n\nEnqueues a wait operation on a set of externally allocated semaphore\nobject in the specified stream. The operations will be executed when\nall prior operations in the stream complete.\n\nThe exact semantics of waiting on a semaphore depends on the type of\nthe object.\n\nIf the semaphore object is any one of the following types:\n:py:obj:`~.cudaExternalSemaphoreHandleTypeOpaqueFd`,\n:py:obj:`~.cudaExternalSemaphoreHandleTypeOpaqueWin32`,\n:py:obj:`~.cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt` then waiting\non the semaphore will wait until the semaphore reaches the signaled\nstate. The semaphore will then be reset to the unsignaled state.\nTherefore for every signal operation, there can only be one wait\noperation.\n\nIf the semaphore object is any one of the following types:\n:py:obj:`~.cudaExternalSemaphoreHandleTypeD3D12Fence`,\n:py:obj:`~.cudaExternalSemaphoreHandleTypeD3D11Fence`,\n:py:obj:`~.cudaExternalSemaphoreHandleTypeTimelineSemaphoreFd`,\n:py:obj:`~.cudaExternalSemaphoreHandleTypeTimelineSemaphoreWin32` then\nwaiting on the semaphore will wait until the value of the semaphore is\ngreater than or equal to\n:py:obj:`~.cudaExternalSemaphoreWaitParams`::params::fence::value.\n\nIf the semaphore object is of the type\n:py:obj:`~.cudaExternalSemaphoreHandleTypeNvSciSync` then, waiting on\nthe semaphore will wait until the\n:py:obj:`~.cudaExternalSemaphoreSignalParams`::params::nvSciSync::fence\nis signaled by the signaler of the NvSciSyncObj that was associated\nwith this semaphore object. By default, waiting on such an external\nsemaphore object causes appropriate memory synchronization operations\nto be performed over all external memory objects that are"" imported as\n:py:obj:`~.cudaExternalMemoryHandleTypeNvSciBuf`. This ensures that any\nsubsequent accesses made by other importers of the same set of NvSciBuf\nmemory object(s) are coherent. These operations can be skipped by\nspecifying the flag\n:py:obj:`~.cudaExternalSemaphoreWaitSkipNvSciBufMemSync`, which can be\nused as a performance optimization when data coherency is not required.\nBut specifying this flag in scenarios where data coherency is required\nresults in undefined behavior. Also, for semaphore object of the type\n:py:obj:`~.cudaExternalSemaphoreHandleTypeNvSciSync`, if the\nNvSciSyncAttrList used to create the NvSciSyncObj had not set the flags\nin :py:obj:`~.cudaDeviceGetNvSciSyncAttributes` to\ncudaNvSciSyncAttrWait, this API will return cudaErrorNotSupported.\n\nIf the semaphore object is any one of the following types:\n:py:obj:`~.cudaExternalSemaphoreHandleTypeKeyedMutex`,\n:py:obj:`~.cudaExternalSemaphoreHandleTypeKeyedMutexKmt`, then the\nkeyed mutex will be acquired when it is released with the key specified\nin\n:py:obj:`~.cudaExternalSemaphoreSignalParams`::params::keyedmutex::key\nor until the timeout specified by\n:py:obj:`~.cudaExternalSemaphoreSignalParams`::params::keyedmutex::timeoutMs\nhas lapsed. The timeout interval can either be a finite value specified\nin milliseconds or an infinite value. In case an infinite value is\nspecified the timeout never elapses. The windows INFINITE macro must be\nused to specify infinite timeout\n\nParameters\n----------\nextSemArray : list[:py:obj:`~.cudaExternalSemaphore_t`]\n External semaphores to be waited on\nparamsArray : list[:py:obj:`~.cudaExternalSemaphoreWaitParams`]\n Array of semaphore parameters\nnumExtSems : unsigned int\n Number of semaphores to wait on\nstream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Stream to enqueue the wait operations in\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidResourceHandle` :py:obj:`~.cuda""ErrorTimeout`\n\nSee Also\n--------\n:py:obj:`~.cudaImportExternalSemaphore`, :py:obj:`~.cudaDestroyExternalSemaphore`, :py:obj:`~.cudaSignalExternalSemaphoresAsync`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_159cudaWaitExternalSemaphoresAsync = {"cudaWaitExternalSemaphoresAsync", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_159cudaWaitExternalSemaphoresAsync, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_158cudaWaitExternalSemaphoresAsync}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_159cudaWaitExternalSemaphoresAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_extSemArray = 0; PyObject *__pyx_v_paramsArray = 0; unsigned int __pyx_v_numExtSems; PyObject *__pyx_v_stream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[4] = {0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaWaitExternalSemaphoresAsync (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_extSemArray_2,&__pyx_mstate_global->__pyx_n_u_paramsArray_2,&__pyx_mstate_global->__pyx_n_u_numExtSems_2,&__pyx_mstate_global->__pyx_n_u_stream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 18807, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 18807, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 18807, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 18807, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18807, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaWaitExternalSemaphoresAsync", 0) < (0)) __PYX_ERR(0, 18807, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 4; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaWaitExternalSemaphoresAsync", 1, 4, 4, i); __PYX_ERR(0, 18807, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 4)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18807, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 18807, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 18807, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 18807, __pyx_L3_error) } __pyx_v_extSemArray = values[0]; __pyx_v_paramsArray = values[1]; __pyx_v_numExtSems = __Pyx_PyLong_As_unsigned_int(values[2]); if (unlikely((__pyx_v_numExtSems == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 18808, __pyx_L3_error) __pyx_v_stream = values[3]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaWaitExternalSemaphoresAsync", 1, 4, 4, __pyx_nargs); __PYX_ERR(0, 18807, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaWaitExternalSemaphoresAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_158cudaWaitExternalSemaphoresAsync(__pyx_self, __pyx_v_extSemArray, __pyx_v_paramsArray, __pyx_v_numExtSems, __pyx_v_stream); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_31cudaWaitExternalSemaphoresAsync_2generator85(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value); /* proto */ /* "cuda/bindings/runtime.pyx":18899 * cystream = pstream * paramsArray = [] if paramsArray is None else paramsArray * if not all(isinstance(_x, (cudaExternalSemaphoreWaitParams,)) for _x in paramsArray): # <<<<<<<<<<<<<< * raise TypeError("Argument 'paramsArray' is not instance of type (expected tuple[cyruntime.cudaExternalSemaphoreWaitParams,] or list[cyruntime.cudaExternalSemaphoreWaitParams,]") * extSemArray = [] if extSemArray is None else extSemArray */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_31cudaWaitExternalSemaphoresAsync_genexpr(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_genexpr_arg_0) { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_85_genexpr *__pyx_cur_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("genexpr", 0); __pyx_cur_scope = (struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_85_genexpr *)__pyx_tp_new_4cuda_8bindings_7runtime___pyx_scope_struct_85_genexpr(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime___pyx_scope_struct_85_genexpr, __pyx_mstate_global->__pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_85_genexpr *)Py_None); __Pyx_INCREF(Py_None); __PYX_ERR(0, 18899, __pyx_L1_error) } else { __Pyx_GOTREF((PyObject *)__pyx_cur_scope); } __pyx_cur_scope->__pyx_genexpr_arg_0 = __pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_cur_scope->__pyx_genexpr_arg_0); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_genexpr_arg_0); { __pyx_CoroutineObject *gen = __Pyx_Generator_New((__pyx_coroutine_body_t) __pyx_gb_4cuda_8bindings_7runtime_31cudaWaitExternalSemaphoresAsync_2generator85, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[85]), (PyObject *) __pyx_cur_scope, __pyx_mstate_global->__pyx_n_u_genexpr, __pyx_mstate_global->__pyx_n_u_cudaWaitExternalSemaphoresAsync, __pyx_mstate_global->__pyx_n_u_cuda_bindings_runtime); if (unlikely(!gen)) __PYX_ERR(0, 18899, __pyx_L1_error) __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; } /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaWaitExternalSemaphoresAsync.genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_DECREF((PyObject *)__pyx_cur_scope); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_31cudaWaitExternalSemaphoresAsync_2generator85(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value) /* generator body */ { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_85_genexpr *__pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_85_genexpr *)__pyx_generator->closure); PyObject *__pyx_r = NULL; PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *(*__pyx_t_3)(PyObject *); PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("genexpr", 0); switch (__pyx_generator->resume_label) { case 0: goto __pyx_L3_first_run; default: /* CPython raises the right error here */ __Pyx_RefNannyFinishContext(); return NULL; } __pyx_L3_first_run:; if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 18899, __pyx_L1_error) if (unlikely(!__pyx_cur_scope->__pyx_genexpr_arg_0)) { __Pyx_RaiseUnboundLocalError(".0"); __PYX_ERR(0, 18899, __pyx_L1_error) } if (likely(PyList_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) || PyTuple_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) { __pyx_t_1 = __pyx_cur_scope->__pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; __pyx_t_3 = NULL; } else { __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_cur_scope->__pyx_genexpr_arg_0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18899, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18899, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_3)) { if (likely(PyList_CheckExact(__pyx_t_1))) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 18899, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } __pyx_t_4 = __Pyx_PyList_GetItemRef(__pyx_t_1, __pyx_t_2); ++__pyx_t_2; } else { { Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 18899, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2)); #else __pyx_t_4 = __Pyx_PySequence_ITEM(__pyx_t_1, __pyx_t_2); #endif ++__pyx_t_2; } if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18899, __pyx_L1_error) } else { __pyx_t_4 = __pyx_t_3(__pyx_t_1); if (unlikely(!__pyx_t_4)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 18899, __pyx_L1_error) PyErr_Clear(); } break; } } __Pyx_GOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v__x); __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v__x, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __Pyx_TypeCheck(__pyx_cur_scope->__pyx_v__x, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaExternalSemaphoreWaitParams); __pyx_t_6 = (!__pyx_t_5); if (__pyx_t_6) { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_False); __pyx_r = Py_False; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_True); __pyx_r = Py_True; goto __pyx_L0; } CYTHON_MAYBE_UNUSED_VAR(__pyx_cur_scope); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); if (__Pyx_PyErr_Occurred()) { __Pyx_Generator_Replace_StopIteration(0); __Pyx_AddTraceback("genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); } __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); #if !CYTHON_USE_EXC_INFO_STACK __Pyx_Coroutine_ResetAndClearException(__pyx_generator); #endif __pyx_generator->resume_label = -1; __Pyx_Coroutine_clear((PyObject*)__pyx_generator); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_31cudaWaitExternalSemaphoresAsync_5generator86(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value); /* proto */ /* "cuda/bindings/runtime.pyx":18902 * raise TypeError("Argument 'paramsArray' is not instance of type (expected tuple[cyruntime.cudaExternalSemaphoreWaitParams,] or list[cyruntime.cudaExternalSemaphoreWaitParams,]") * extSemArray = [] if extSemArray is None else extSemArray * if not all(isinstance(_x, (cudaExternalSemaphore_t,)) for _x in extSemArray): # <<<<<<<<<<<<<< * raise TypeError("Argument 'extSemArray' is not instance of type (expected tuple[cyruntime.cudaExternalSemaphore_t,] or list[cyruntime.cudaExternalSemaphore_t,]") * cdef cyruntime.cudaExternalSemaphore_t* cyextSemArray = NULL */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_31cudaWaitExternalSemaphoresAsync_3genexpr(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_genexpr_arg_0) { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_86_genexpr *__pyx_cur_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("genexpr", 0); __pyx_cur_scope = (struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_86_genexpr *)__pyx_tp_new_4cuda_8bindings_7runtime___pyx_scope_struct_86_genexpr(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime___pyx_scope_struct_86_genexpr, __pyx_mstate_global->__pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_86_genexpr *)Py_None); __Pyx_INCREF(Py_None); __PYX_ERR(0, 18902, __pyx_L1_error) } else { __Pyx_GOTREF((PyObject *)__pyx_cur_scope); } __pyx_cur_scope->__pyx_genexpr_arg_0 = __pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_cur_scope->__pyx_genexpr_arg_0); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_genexpr_arg_0); { __pyx_CoroutineObject *gen = __Pyx_Generator_New((__pyx_coroutine_body_t) __pyx_gb_4cuda_8bindings_7runtime_31cudaWaitExternalSemaphoresAsync_5generator86, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[86]), (PyObject *) __pyx_cur_scope, __pyx_mstate_global->__pyx_n_u_genexpr, __pyx_mstate_global->__pyx_n_u_cudaWaitExternalSemaphoresAsync, __pyx_mstate_global->__pyx_n_u_cuda_bindings_runtime); if (unlikely(!gen)) __PYX_ERR(0, 18902, __pyx_L1_error) __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; } /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaWaitExternalSemaphoresAsync.genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_DECREF((PyObject *)__pyx_cur_scope); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_31cudaWaitExternalSemaphoresAsync_5generator86(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value) /* generator body */ { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_86_genexpr *__pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_86_genexpr *)__pyx_generator->closure); PyObject *__pyx_r = NULL; PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *(*__pyx_t_3)(PyObject *); PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("genexpr", 0); switch (__pyx_generator->resume_label) { case 0: goto __pyx_L3_first_run; default: /* CPython raises the right error here */ __Pyx_RefNannyFinishContext(); return NULL; } __pyx_L3_first_run:; if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 18902, __pyx_L1_error) if (unlikely(!__pyx_cur_scope->__pyx_genexpr_arg_0)) { __Pyx_RaiseUnboundLocalError(".0"); __PYX_ERR(0, 18902, __pyx_L1_error) } if (likely(PyList_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) || PyTuple_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) { __pyx_t_1 = __pyx_cur_scope->__pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; __pyx_t_3 = NULL; } else { __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_cur_scope->__pyx_genexpr_arg_0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18902, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18902, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_3)) { if (likely(PyList_CheckExact(__pyx_t_1))) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 18902, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } __pyx_t_4 = __Pyx_PyList_GetItemRef(__pyx_t_1, __pyx_t_2); ++__pyx_t_2; } else { { Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 18902, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2)); #else __pyx_t_4 = __Pyx_PySequence_ITEM(__pyx_t_1, __pyx_t_2); #endif ++__pyx_t_2; } if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18902, __pyx_L1_error) } else { __pyx_t_4 = __pyx_t_3(__pyx_t_1); if (unlikely(!__pyx_t_4)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 18902, __pyx_L1_error) PyErr_Clear(); } break; } } __Pyx_GOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v__x); __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v__x, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __Pyx_TypeCheck(__pyx_cur_scope->__pyx_v__x, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaExternalSemaphore_t); __pyx_t_6 = (!__pyx_t_5); if (__pyx_t_6) { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_False); __pyx_r = Py_False; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_True); __pyx_r = Py_True; goto __pyx_L0; } CYTHON_MAYBE_UNUSED_VAR(__pyx_cur_scope); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); if (__Pyx_PyErr_Occurred()) { __Pyx_Generator_Replace_StopIteration(0); __Pyx_AddTraceback("genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); } __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); #if !CYTHON_USE_EXC_INFO_STACK __Pyx_Coroutine_ResetAndClearException(__pyx_generator); #endif __pyx_generator->resume_label = -1; __Pyx_Coroutine_clear((PyObject*)__pyx_generator); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":18807 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaWaitExternalSemaphoresAsync(extSemArray : Optional[tuple[cudaExternalSemaphore_t] | list[cudaExternalSemaphore_t]], paramsArray : Optional[tuple[cudaExternalSemaphoreWaitParams] | list[cudaExternalSemaphoreWaitParams]], unsigned int numExtSems, stream): * """ Waits on a set of external semaphore objects. */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_158cudaWaitExternalSemaphoresAsync(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_extSemArray, PyObject *__pyx_v_paramsArray, unsigned int __pyx_v_numExtSems, PyObject *__pyx_v_stream) { cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; cudaExternalSemaphore_t *__pyx_v_cyextSemArray; Py_ssize_t __pyx_v_idx; struct cudaExternalSemaphoreWaitParams *__pyx_v_cyparamsArray; cudaError_t __pyx_v_err; PyObject *__pyx_gb_4cuda_8bindings_7runtime_31cudaWaitExternalSemaphoresAsync_2generator85 = 0; PyObject *__pyx_gb_4cuda_8bindings_7runtime_31cudaWaitExternalSemaphoresAsync_5generator86 = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; Py_ssize_t __pyx_t_8; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; Py_ssize_t __pyx_t_12; Py_ssize_t __pyx_t_13; struct cudaExternalSemaphoreWaitParams *__pyx_t_14; cudaError_t __pyx_t_15; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaWaitExternalSemaphoresAsync", 0); __Pyx_INCREF(__pyx_v_extSemArray); __Pyx_INCREF(__pyx_v_paramsArray); /* "cuda/bindings/runtime.pyx":18891 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18892 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":18891 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":18893 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18894 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18894, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":18893 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":18896 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * paramsArray = [] if paramsArray is None else paramsArray */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_stream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18896, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18896, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":18897 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * paramsArray = [] if paramsArray is None else paramsArray * if not all(isinstance(_x, (cudaExternalSemaphoreWaitParams,)) for _x in paramsArray): */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 18897, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":18898 * pstream = int(cudaStream_t(stream)) * cystream = pstream * paramsArray = [] if paramsArray is None else paramsArray # <<<<<<<<<<<<<< * if not all(isinstance(_x, (cudaExternalSemaphoreWaitParams,)) for _x in paramsArray): * raise TypeError("Argument 'paramsArray' is not instance of type (expected tuple[cyruntime.cudaExternalSemaphoreWaitParams,] or list[cyruntime.cudaExternalSemaphoreWaitParams,]") */ __pyx_t_1 = (__pyx_v_paramsArray == Py_None); if (__pyx_t_1) { __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18898, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __pyx_t_3; __pyx_t_3 = 0; } else { __Pyx_INCREF(__pyx_v_paramsArray); __pyx_t_5 = __pyx_v_paramsArray; } __Pyx_DECREF_SET(__pyx_v_paramsArray, __pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":18899 * cystream = pstream * paramsArray = [] if paramsArray is None else paramsArray * if not all(isinstance(_x, (cudaExternalSemaphoreWaitParams,)) for _x in paramsArray): # <<<<<<<<<<<<<< * raise TypeError("Argument 'paramsArray' is not instance of type (expected tuple[cyruntime.cudaExternalSemaphoreWaitParams,] or list[cyruntime.cudaExternalSemaphoreWaitParams,]") * extSemArray = [] if extSemArray is None else extSemArray */ __pyx_t_5 = __pyx_pf_4cuda_8bindings_7runtime_31cudaWaitExternalSemaphoresAsync_genexpr(NULL, __pyx_v_paramsArray); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18899, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_Generator_GetInlinedResult(__pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18899, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 18899, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_2 = (!__pyx_t_1); if (unlikely(__pyx_t_2)) { /* "cuda/bindings/runtime.pyx":18900 * paramsArray = [] if paramsArray is None else paramsArray * if not all(isinstance(_x, (cudaExternalSemaphoreWaitParams,)) for _x in paramsArray): * raise TypeError("Argument 'paramsArray' is not instance of type (expected tuple[cyruntime.cudaExternalSemaphoreWaitParams,] or list[cyruntime.cudaExternalSemaphoreWaitParams,]") # <<<<<<<<<<<<<< * extSemArray = [] if extSemArray is None else extSemArray * if not all(isinstance(_x, (cudaExternalSemaphore_t,)) for _x in extSemArray): */ __pyx_t_5 = NULL; __Pyx_INCREF(__pyx_builtin_TypeError); __pyx_t_4 = __pyx_builtin_TypeError; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_mstate_global->__pyx_kp_u_Argument_paramsArray_is_not_inst_2}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18900, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 18900, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":18899 * cystream = pstream * paramsArray = [] if paramsArray is None else paramsArray * if not all(isinstance(_x, (cudaExternalSemaphoreWaitParams,)) for _x in paramsArray): # <<<<<<<<<<<<<< * raise TypeError("Argument 'paramsArray' is not instance of type (expected tuple[cyruntime.cudaExternalSemaphoreWaitParams,] or list[cyruntime.cudaExternalSemaphoreWaitParams,]") * extSemArray = [] if extSemArray is None else extSemArray */ } /* "cuda/bindings/runtime.pyx":18901 * if not all(isinstance(_x, (cudaExternalSemaphoreWaitParams,)) for _x in paramsArray): * raise TypeError("Argument 'paramsArray' is not instance of type (expected tuple[cyruntime.cudaExternalSemaphoreWaitParams,] or list[cyruntime.cudaExternalSemaphoreWaitParams,]") * extSemArray = [] if extSemArray is None else extSemArray # <<<<<<<<<<<<<< * if not all(isinstance(_x, (cudaExternalSemaphore_t,)) for _x in extSemArray): * raise TypeError("Argument 'extSemArray' is not instance of type (expected tuple[cyruntime.cudaExternalSemaphore_t,] or list[cyruntime.cudaExternalSemaphore_t,]") */ __pyx_t_2 = (__pyx_v_extSemArray == Py_None); if (__pyx_t_2) { __pyx_t_4 = PyList_New(0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18901, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; } else { __Pyx_INCREF(__pyx_v_extSemArray); __pyx_t_3 = __pyx_v_extSemArray; } __Pyx_DECREF_SET(__pyx_v_extSemArray, __pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":18902 * raise TypeError("Argument 'paramsArray' is not instance of type (expected tuple[cyruntime.cudaExternalSemaphoreWaitParams,] or list[cyruntime.cudaExternalSemaphoreWaitParams,]") * extSemArray = [] if extSemArray is None else extSemArray * if not all(isinstance(_x, (cudaExternalSemaphore_t,)) for _x in extSemArray): # <<<<<<<<<<<<<< * raise TypeError("Argument 'extSemArray' is not instance of type (expected tuple[cyruntime.cudaExternalSemaphore_t,] or list[cyruntime.cudaExternalSemaphore_t,]") * cdef cyruntime.cudaExternalSemaphore_t* cyextSemArray = NULL */ __pyx_t_3 = __pyx_pf_4cuda_8bindings_7runtime_31cudaWaitExternalSemaphoresAsync_3genexpr(NULL, __pyx_v_extSemArray); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18902, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_Generator_GetInlinedResult(__pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18902, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 18902, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_1 = (!__pyx_t_2); if (unlikely(__pyx_t_1)) { /* "cuda/bindings/runtime.pyx":18903 * extSemArray = [] if extSemArray is None else extSemArray * if not all(isinstance(_x, (cudaExternalSemaphore_t,)) for _x in extSemArray): * raise TypeError("Argument 'extSemArray' is not instance of type (expected tuple[cyruntime.cudaExternalSemaphore_t,] or list[cyruntime.cudaExternalSemaphore_t,]") # <<<<<<<<<<<<<< * cdef cyruntime.cudaExternalSemaphore_t* cyextSemArray = NULL * if len(extSemArray) > 1: */ __pyx_t_3 = NULL; __Pyx_INCREF(__pyx_builtin_TypeError); __pyx_t_5 = __pyx_builtin_TypeError; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Argument_extSemArray_is_not_inst}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18903, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); } __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(0, 18903, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":18902 * raise TypeError("Argument 'paramsArray' is not instance of type (expected tuple[cyruntime.cudaExternalSemaphoreWaitParams,] or list[cyruntime.cudaExternalSemaphoreWaitParams,]") * extSemArray = [] if extSemArray is None else extSemArray * if not all(isinstance(_x, (cudaExternalSemaphore_t,)) for _x in extSemArray): # <<<<<<<<<<<<<< * raise TypeError("Argument 'extSemArray' is not instance of type (expected tuple[cyruntime.cudaExternalSemaphore_t,] or list[cyruntime.cudaExternalSemaphore_t,]") * cdef cyruntime.cudaExternalSemaphore_t* cyextSemArray = NULL */ } /* "cuda/bindings/runtime.pyx":18904 * if not all(isinstance(_x, (cudaExternalSemaphore_t,)) for _x in extSemArray): * raise TypeError("Argument 'extSemArray' is not instance of type (expected tuple[cyruntime.cudaExternalSemaphore_t,] or list[cyruntime.cudaExternalSemaphore_t,]") * cdef cyruntime.cudaExternalSemaphore_t* cyextSemArray = NULL # <<<<<<<<<<<<<< * if len(extSemArray) > 1: * cyextSemArray = calloc(len(extSemArray), sizeof(cyruntime.cudaExternalSemaphore_t)) */ __pyx_v_cyextSemArray = NULL; /* "cuda/bindings/runtime.pyx":18905 * raise TypeError("Argument 'extSemArray' is not instance of type (expected tuple[cyruntime.cudaExternalSemaphore_t,] or list[cyruntime.cudaExternalSemaphore_t,]") * cdef cyruntime.cudaExternalSemaphore_t* cyextSemArray = NULL * if len(extSemArray) > 1: # <<<<<<<<<<<<<< * cyextSemArray = calloc(len(extSemArray), sizeof(cyruntime.cudaExternalSemaphore_t)) * if cyextSemArray is NULL: */ __pyx_t_8 = PyObject_Length(__pyx_v_extSemArray); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18905, __pyx_L1_error) __pyx_t_1 = (__pyx_t_8 > 1); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18906 * cdef cyruntime.cudaExternalSemaphore_t* cyextSemArray = NULL * if len(extSemArray) > 1: * cyextSemArray = calloc(len(extSemArray), sizeof(cyruntime.cudaExternalSemaphore_t)) # <<<<<<<<<<<<<< * if cyextSemArray is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(extSemArray)) + 'x' + str(sizeof(cyruntime.cudaExternalSemaphore_t))) */ __pyx_t_8 = PyObject_Length(__pyx_v_extSemArray); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18906, __pyx_L1_error) __pyx_v_cyextSemArray = ((cudaExternalSemaphore_t *)calloc(__pyx_t_8, (sizeof(cudaExternalSemaphore_t)))); /* "cuda/bindings/runtime.pyx":18907 * if len(extSemArray) > 1: * cyextSemArray = calloc(len(extSemArray), sizeof(cyruntime.cudaExternalSemaphore_t)) * if cyextSemArray is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(extSemArray)) + 'x' + str(sizeof(cyruntime.cudaExternalSemaphore_t))) * else: */ __pyx_t_1 = (__pyx_v_cyextSemArray == NULL); if (unlikely(__pyx_t_1)) { /* "cuda/bindings/runtime.pyx":18908 * cyextSemArray = calloc(len(extSemArray), sizeof(cyruntime.cudaExternalSemaphore_t)) * if cyextSemArray is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(extSemArray)) + 'x' + str(sizeof(cyruntime.cudaExternalSemaphore_t))) # <<<<<<<<<<<<<< * else: * for idx in range(len(extSemArray)): */ __pyx_t_5 = NULL; __Pyx_INCREF(__pyx_builtin_MemoryError); __pyx_t_3 = __pyx_builtin_MemoryError; __pyx_t_8 = PyObject_Length(__pyx_v_extSemArray); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18908, __pyx_L1_error) __pyx_t_9 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 18908, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 18908, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_Failed_to_allocate_length_x_size, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 18908, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_x_2); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 18908, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyLong_FromSize_t((sizeof(cudaExternalSemaphore_t))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 18908, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_9); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 18908, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_10, __pyx_t_11); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 18908, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_9}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18908, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); } __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(0, 18908, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":18907 * if len(extSemArray) > 1: * cyextSemArray = calloc(len(extSemArray), sizeof(cyruntime.cudaExternalSemaphore_t)) * if cyextSemArray is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(extSemArray)) + 'x' + str(sizeof(cyruntime.cudaExternalSemaphore_t))) * else: */ } /* "cuda/bindings/runtime.pyx":18910 * raise MemoryError('Failed to allocate length x size memory: ' + str(len(extSemArray)) + 'x' + str(sizeof(cyruntime.cudaExternalSemaphore_t))) * else: * for idx in range(len(extSemArray)): # <<<<<<<<<<<<<< * cyextSemArray[idx] = (extSemArray[idx])._pvt_ptr[0] * elif len(extSemArray) == 1: */ /*else*/ { __pyx_t_8 = PyObject_Length(__pyx_v_extSemArray); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18910, __pyx_L1_error) __pyx_t_12 = __pyx_t_8; for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { __pyx_v_idx = __pyx_t_13; /* "cuda/bindings/runtime.pyx":18911 * else: * for idx in range(len(extSemArray)): * cyextSemArray[idx] = (extSemArray[idx])._pvt_ptr[0] # <<<<<<<<<<<<<< * elif len(extSemArray) == 1: * cyextSemArray = (extSemArray[0])._pvt_ptr */ __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_extSemArray, __pyx_v_idx, Py_ssize_t, 1, PyLong_FromSsize_t, 0, 1, 1, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18911, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); (__pyx_v_cyextSemArray[__pyx_v_idx]) = ((cudaExternalSemaphore_t)(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaExternalSemaphore_t *)__pyx_t_4)->_pvt_ptr[0])); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } } /* "cuda/bindings/runtime.pyx":18905 * raise TypeError("Argument 'extSemArray' is not instance of type (expected tuple[cyruntime.cudaExternalSemaphore_t,] or list[cyruntime.cudaExternalSemaphore_t,]") * cdef cyruntime.cudaExternalSemaphore_t* cyextSemArray = NULL * if len(extSemArray) > 1: # <<<<<<<<<<<<<< * cyextSemArray = calloc(len(extSemArray), sizeof(cyruntime.cudaExternalSemaphore_t)) * if cyextSemArray is NULL: */ goto __pyx_L8; } /* "cuda/bindings/runtime.pyx":18912 * for idx in range(len(extSemArray)): * cyextSemArray[idx] = (extSemArray[idx])._pvt_ptr[0] * elif len(extSemArray) == 1: # <<<<<<<<<<<<<< * cyextSemArray = (extSemArray[0])._pvt_ptr * cdef cyruntime.cudaExternalSemaphoreWaitParams* cyparamsArray = NULL */ __pyx_t_8 = PyObject_Length(__pyx_v_extSemArray); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18912, __pyx_L1_error) __pyx_t_1 = (__pyx_t_8 == 1); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18913 * cyextSemArray[idx] = (extSemArray[idx])._pvt_ptr[0] * elif len(extSemArray) == 1: * cyextSemArray = (extSemArray[0])._pvt_ptr # <<<<<<<<<<<<<< * cdef cyruntime.cudaExternalSemaphoreWaitParams* cyparamsArray = NULL * if len(paramsArray) > 1: */ __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_extSemArray, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18913, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_v_cyextSemArray = ((cudaExternalSemaphore_t *)((struct __pyx_obj_4cuda_8bindings_7runtime_cudaExternalSemaphore_t *)__pyx_t_4)->_pvt_ptr); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "cuda/bindings/runtime.pyx":18912 * for idx in range(len(extSemArray)): * cyextSemArray[idx] = (extSemArray[idx])._pvt_ptr[0] * elif len(extSemArray) == 1: # <<<<<<<<<<<<<< * cyextSemArray = (extSemArray[0])._pvt_ptr * cdef cyruntime.cudaExternalSemaphoreWaitParams* cyparamsArray = NULL */ } __pyx_L8:; /* "cuda/bindings/runtime.pyx":18914 * elif len(extSemArray) == 1: * cyextSemArray = (extSemArray[0])._pvt_ptr * cdef cyruntime.cudaExternalSemaphoreWaitParams* cyparamsArray = NULL # <<<<<<<<<<<<<< * if len(paramsArray) > 1: * cyparamsArray = calloc(len(paramsArray), sizeof(cyruntime.cudaExternalSemaphoreWaitParams)) */ __pyx_v_cyparamsArray = NULL; /* "cuda/bindings/runtime.pyx":18915 * cyextSemArray = (extSemArray[0])._pvt_ptr * cdef cyruntime.cudaExternalSemaphoreWaitParams* cyparamsArray = NULL * if len(paramsArray) > 1: # <<<<<<<<<<<<<< * cyparamsArray = calloc(len(paramsArray), sizeof(cyruntime.cudaExternalSemaphoreWaitParams)) * if cyparamsArray is NULL: */ __pyx_t_8 = PyObject_Length(__pyx_v_paramsArray); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18915, __pyx_L1_error) __pyx_t_1 = (__pyx_t_8 > 1); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18916 * cdef cyruntime.cudaExternalSemaphoreWaitParams* cyparamsArray = NULL * if len(paramsArray) > 1: * cyparamsArray = calloc(len(paramsArray), sizeof(cyruntime.cudaExternalSemaphoreWaitParams)) # <<<<<<<<<<<<<< * if cyparamsArray is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(paramsArray)) + 'x' + str(sizeof(cyruntime.cudaExternalSemaphoreWaitParams))) */ __pyx_t_8 = PyObject_Length(__pyx_v_paramsArray); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18916, __pyx_L1_error) __pyx_v_cyparamsArray = ((struct cudaExternalSemaphoreWaitParams *)calloc(__pyx_t_8, (sizeof(struct cudaExternalSemaphoreWaitParams)))); /* "cuda/bindings/runtime.pyx":18917 * if len(paramsArray) > 1: * cyparamsArray = calloc(len(paramsArray), sizeof(cyruntime.cudaExternalSemaphoreWaitParams)) * if cyparamsArray is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(paramsArray)) + 'x' + str(sizeof(cyruntime.cudaExternalSemaphoreWaitParams))) * for idx in range(len(paramsArray)): */ __pyx_t_1 = (__pyx_v_cyparamsArray == NULL); if (unlikely(__pyx_t_1)) { /* "cuda/bindings/runtime.pyx":18918 * cyparamsArray = calloc(len(paramsArray), sizeof(cyruntime.cudaExternalSemaphoreWaitParams)) * if cyparamsArray is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(paramsArray)) + 'x' + str(sizeof(cyruntime.cudaExternalSemaphoreWaitParams))) # <<<<<<<<<<<<<< * for idx in range(len(paramsArray)): * string.memcpy(&cyparamsArray[idx], (paramsArray[idx])._pvt_ptr, sizeof(cyruntime.cudaExternalSemaphoreWaitParams)) */ __pyx_t_3 = NULL; __Pyx_INCREF(__pyx_builtin_MemoryError); __pyx_t_9 = __pyx_builtin_MemoryError; __pyx_t_8 = PyObject_Length(__pyx_v_paramsArray); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18918, __pyx_L1_error) __pyx_t_5 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18918, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 18918, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_Failed_to_allocate_length_x_size, __pyx_t_11); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18918, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_11 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_x_2); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 18918, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyLong_FromSize_t((sizeof(struct cudaExternalSemaphoreWaitParams))); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18918, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 18918, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_11, __pyx_t_10); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 18918, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_5}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_9, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18918, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); } __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(0, 18918, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":18917 * if len(paramsArray) > 1: * cyparamsArray = calloc(len(paramsArray), sizeof(cyruntime.cudaExternalSemaphoreWaitParams)) * if cyparamsArray is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(paramsArray)) + 'x' + str(sizeof(cyruntime.cudaExternalSemaphoreWaitParams))) * for idx in range(len(paramsArray)): */ } /* "cuda/bindings/runtime.pyx":18919 * if cyparamsArray is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(paramsArray)) + 'x' + str(sizeof(cyruntime.cudaExternalSemaphoreWaitParams))) * for idx in range(len(paramsArray)): # <<<<<<<<<<<<<< * string.memcpy(&cyparamsArray[idx], (paramsArray[idx])._pvt_ptr, sizeof(cyruntime.cudaExternalSemaphoreWaitParams)) * elif len(paramsArray) == 1: */ __pyx_t_8 = PyObject_Length(__pyx_v_paramsArray); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18919, __pyx_L1_error) __pyx_t_12 = __pyx_t_8; for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { __pyx_v_idx = __pyx_t_13; /* "cuda/bindings/runtime.pyx":18920 * raise MemoryError('Failed to allocate length x size memory: ' + str(len(paramsArray)) + 'x' + str(sizeof(cyruntime.cudaExternalSemaphoreWaitParams))) * for idx in range(len(paramsArray)): * string.memcpy(&cyparamsArray[idx], (paramsArray[idx])._pvt_ptr, sizeof(cyruntime.cudaExternalSemaphoreWaitParams)) # <<<<<<<<<<<<<< * elif len(paramsArray) == 1: * cyparamsArray = (paramsArray[0])._pvt_ptr */ __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_paramsArray, __pyx_v_idx, Py_ssize_t, 1, PyLong_FromSsize_t, 0, 1, 1, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18920, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); (void)(memcpy((&(__pyx_v_cyparamsArray[__pyx_v_idx])), ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaExternalSemaphoreWaitParams *)__pyx_t_4)->_pvt_ptr, (sizeof(struct cudaExternalSemaphoreWaitParams)))); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } /* "cuda/bindings/runtime.pyx":18915 * cyextSemArray = (extSemArray[0])._pvt_ptr * cdef cyruntime.cudaExternalSemaphoreWaitParams* cyparamsArray = NULL * if len(paramsArray) > 1: # <<<<<<<<<<<<<< * cyparamsArray = calloc(len(paramsArray), sizeof(cyruntime.cudaExternalSemaphoreWaitParams)) * if cyparamsArray is NULL: */ goto __pyx_L12; } /* "cuda/bindings/runtime.pyx":18921 * for idx in range(len(paramsArray)): * string.memcpy(&cyparamsArray[idx], (paramsArray[idx])._pvt_ptr, sizeof(cyruntime.cudaExternalSemaphoreWaitParams)) * elif len(paramsArray) == 1: # <<<<<<<<<<<<<< * cyparamsArray = (paramsArray[0])._pvt_ptr * if numExtSems > len(extSemArray): raise RuntimeError("List is too small: " + str(len(extSemArray)) + " < " + str(numExtSems)) */ __pyx_t_8 = PyObject_Length(__pyx_v_paramsArray); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18921, __pyx_L1_error) __pyx_t_1 = (__pyx_t_8 == 1); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18922 * string.memcpy(&cyparamsArray[idx], (paramsArray[idx])._pvt_ptr, sizeof(cyruntime.cudaExternalSemaphoreWaitParams)) * elif len(paramsArray) == 1: * cyparamsArray = (paramsArray[0])._pvt_ptr # <<<<<<<<<<<<<< * if numExtSems > len(extSemArray): raise RuntimeError("List is too small: " + str(len(extSemArray)) + " < " + str(numExtSems)) * if numExtSems > len(paramsArray): raise RuntimeError("List is too small: " + str(len(paramsArray)) + " < " + str(numExtSems)) */ __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_paramsArray, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18922, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_14 = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaExternalSemaphoreWaitParams *)__pyx_t_4)->_pvt_ptr; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_cyparamsArray = __pyx_t_14; /* "cuda/bindings/runtime.pyx":18921 * for idx in range(len(paramsArray)): * string.memcpy(&cyparamsArray[idx], (paramsArray[idx])._pvt_ptr, sizeof(cyruntime.cudaExternalSemaphoreWaitParams)) * elif len(paramsArray) == 1: # <<<<<<<<<<<<<< * cyparamsArray = (paramsArray[0])._pvt_ptr * if numExtSems > len(extSemArray): raise RuntimeError("List is too small: " + str(len(extSemArray)) + " < " + str(numExtSems)) */ } __pyx_L12:; /* "cuda/bindings/runtime.pyx":18923 * elif len(paramsArray) == 1: * cyparamsArray = (paramsArray[0])._pvt_ptr * if numExtSems > len(extSemArray): raise RuntimeError("List is too small: " + str(len(extSemArray)) + " < " + str(numExtSems)) # <<<<<<<<<<<<<< * if numExtSems > len(paramsArray): raise RuntimeError("List is too small: " + str(len(paramsArray)) + " < " + str(numExtSems)) * with nogil: */ __pyx_t_8 = PyObject_Length(__pyx_v_extSemArray); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18923, __pyx_L1_error) __pyx_t_1 = (__pyx_v_numExtSems > __pyx_t_8); if (unlikely(__pyx_t_1)) { __pyx_t_9 = NULL; __Pyx_INCREF(__pyx_builtin_RuntimeError); __pyx_t_5 = __pyx_builtin_RuntimeError; __pyx_t_8 = PyObject_Length(__pyx_v_extSemArray); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18923, __pyx_L1_error) __pyx_t_3 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18923, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 18923, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_List_is_too_small, __pyx_t_10); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18923, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_3, __pyx_mstate_global->__pyx_kp_u__6); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 18923, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyLong_From_unsigned_int(__pyx_v_numExtSems); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18923, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 18923, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_10, __pyx_t_11); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18923, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_9, __pyx_t_3}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18923, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); } __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(0, 18923, __pyx_L1_error) } /* "cuda/bindings/runtime.pyx":18924 * cyparamsArray = (paramsArray[0])._pvt_ptr * if numExtSems > len(extSemArray): raise RuntimeError("List is too small: " + str(len(extSemArray)) + " < " + str(numExtSems)) * if numExtSems > len(paramsArray): raise RuntimeError("List is too small: " + str(len(paramsArray)) + " < " + str(numExtSems)) # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaWaitExternalSemaphoresAsync(cyextSemArray, cyparamsArray, numExtSems, cystream) */ __pyx_t_8 = PyObject_Length(__pyx_v_paramsArray); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18924, __pyx_L1_error) __pyx_t_1 = (__pyx_v_numExtSems > __pyx_t_8); if (unlikely(__pyx_t_1)) { __pyx_t_5 = NULL; __Pyx_INCREF(__pyx_builtin_RuntimeError); __pyx_t_3 = __pyx_builtin_RuntimeError; __pyx_t_8 = PyObject_Length(__pyx_v_paramsArray); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18924, __pyx_L1_error) __pyx_t_9 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 18924, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_9); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 18924, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_List_is_too_small, __pyx_t_11); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 18924, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_11 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_9, __pyx_mstate_global->__pyx_kp_u__6); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 18924, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyLong_From_unsigned_int(__pyx_v_numExtSems); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 18924, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 18924, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_11, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 18924, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_9}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18924, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); } __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(0, 18924, __pyx_L1_error) } /* "cuda/bindings/runtime.pyx":18925 * if numExtSems > len(extSemArray): raise RuntimeError("List is too small: " + str(len(extSemArray)) + " < " + str(numExtSems)) * if numExtSems > len(paramsArray): raise RuntimeError("List is too small: " + str(len(paramsArray)) + " < " + str(numExtSems)) * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaWaitExternalSemaphoresAsync(cyextSemArray, cyparamsArray, numExtSems, cystream) * if len(extSemArray) > 1 and cyextSemArray is not NULL: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":18926 * if numExtSems > len(paramsArray): raise RuntimeError("List is too small: " + str(len(paramsArray)) + " < " + str(numExtSems)) * with nogil: * err = cyruntime.cudaWaitExternalSemaphoresAsync(cyextSemArray, cyparamsArray, numExtSems, cystream) # <<<<<<<<<<<<<< * if len(extSemArray) > 1 and cyextSemArray is not NULL: * free(cyextSemArray) */ __pyx_t_15 = __pyx_f_4cuda_8bindings_9cyruntime_cudaWaitExternalSemaphoresAsync(__pyx_v_cyextSemArray, __pyx_v_cyparamsArray, __pyx_v_numExtSems, __pyx_v_cystream); if (unlikely(__pyx_t_15 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 18926, __pyx_L19_error) __pyx_v_err = __pyx_t_15; } /* "cuda/bindings/runtime.pyx":18925 * if numExtSems > len(extSemArray): raise RuntimeError("List is too small: " + str(len(extSemArray)) + " < " + str(numExtSems)) * if numExtSems > len(paramsArray): raise RuntimeError("List is too small: " + str(len(paramsArray)) + " < " + str(numExtSems)) * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaWaitExternalSemaphoresAsync(cyextSemArray, cyparamsArray, numExtSems, cystream) * if len(extSemArray) > 1 and cyextSemArray is not NULL: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L20; } __pyx_L19_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L20:; } } /* "cuda/bindings/runtime.pyx":18927 * with nogil: * err = cyruntime.cudaWaitExternalSemaphoresAsync(cyextSemArray, cyparamsArray, numExtSems, cystream) * if len(extSemArray) > 1 and cyextSemArray is not NULL: # <<<<<<<<<<<<<< * free(cyextSemArray) * if len(paramsArray) > 1 and cyparamsArray is not NULL: */ __pyx_t_8 = PyObject_Length(__pyx_v_extSemArray); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18927, __pyx_L1_error) __pyx_t_2 = (__pyx_t_8 > 1); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L22_bool_binop_done; } __pyx_t_2 = (__pyx_v_cyextSemArray != NULL); __pyx_t_1 = __pyx_t_2; __pyx_L22_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18928 * err = cyruntime.cudaWaitExternalSemaphoresAsync(cyextSemArray, cyparamsArray, numExtSems, cystream) * if len(extSemArray) > 1 and cyextSemArray is not NULL: * free(cyextSemArray) # <<<<<<<<<<<<<< * if len(paramsArray) > 1 and cyparamsArray is not NULL: * free(cyparamsArray) */ free(__pyx_v_cyextSemArray); /* "cuda/bindings/runtime.pyx":18927 * with nogil: * err = cyruntime.cudaWaitExternalSemaphoresAsync(cyextSemArray, cyparamsArray, numExtSems, cystream) * if len(extSemArray) > 1 and cyextSemArray is not NULL: # <<<<<<<<<<<<<< * free(cyextSemArray) * if len(paramsArray) > 1 and cyparamsArray is not NULL: */ } /* "cuda/bindings/runtime.pyx":18929 * if len(extSemArray) > 1 and cyextSemArray is not NULL: * free(cyextSemArray) * if len(paramsArray) > 1 and cyparamsArray is not NULL: # <<<<<<<<<<<<<< * free(cyparamsArray) * return (_dict_cudaError_t[err],) */ __pyx_t_8 = PyObject_Length(__pyx_v_paramsArray); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 18929, __pyx_L1_error) __pyx_t_2 = (__pyx_t_8 > 1); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L25_bool_binop_done; } __pyx_t_2 = (__pyx_v_cyparamsArray != NULL); __pyx_t_1 = __pyx_t_2; __pyx_L25_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18930 * free(cyextSemArray) * if len(paramsArray) > 1 and cyparamsArray is not NULL: * free(cyparamsArray) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ free(__pyx_v_cyparamsArray); /* "cuda/bindings/runtime.pyx":18929 * if len(extSemArray) > 1 and cyextSemArray is not NULL: * free(cyextSemArray) * if len(paramsArray) > 1 and cyparamsArray is not NULL: # <<<<<<<<<<<<<< * free(cyparamsArray) * return (_dict_cudaError_t[err],) */ } /* "cuda/bindings/runtime.pyx":18931 * if len(paramsArray) > 1 and cyparamsArray is not NULL: * free(cyparamsArray) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_9 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 18931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_9); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_9) != (0)) __PYX_ERR(0, 18931, __pyx_L1_error); __pyx_t_9 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":18807 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaWaitExternalSemaphoresAsync(extSemArray : Optional[tuple[cudaExternalSemaphore_t] | list[cudaExternalSemaphore_t]], paramsArray : Optional[tuple[cudaExternalSemaphoreWaitParams] | list[cudaExternalSemaphoreWaitParams]], unsigned int numExtSems, stream): * """ Waits on a set of external semaphore objects. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("cuda.bindings.runtime.cudaWaitExternalSemaphoresAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XDECREF(__pyx_gb_4cuda_8bindings_7runtime_31cudaWaitExternalSemaphoresAsync_2generator85); __Pyx_XDECREF(__pyx_gb_4cuda_8bindings_7runtime_31cudaWaitExternalSemaphoresAsync_5generator86); __Pyx_XDECREF(__pyx_v_extSemArray); __Pyx_XDECREF(__pyx_v_paramsArray); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":18933 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDestroyExternalSemaphore(extSem): * """ Destroys an external semaphore. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_161cudaDestroyExternalSemaphore(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_160cudaDestroyExternalSemaphore, "cudaDestroyExternalSemaphore(extSem)\n\nDestroys an external semaphore.\n\nDestroys an external semaphore object and releases any references to\nthe underlying resource. Any outstanding signals or waits must have\ncompleted before the semaphore is destroyed.\n\nParameters\n----------\nextSem : :py:obj:`~.cudaExternalSemaphore_t`\n External semaphore to be destroyed\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidResourceHandle`\n\nSee Also\n--------\n:py:obj:`~.cudaImportExternalSemaphore`, :py:obj:`~.cudaSignalExternalSemaphoresAsync`, :py:obj:`~.cudaWaitExternalSemaphoresAsync`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_161cudaDestroyExternalSemaphore = {"cudaDestroyExternalSemaphore", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_161cudaDestroyExternalSemaphore, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_160cudaDestroyExternalSemaphore}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_161cudaDestroyExternalSemaphore(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_extSem = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaDestroyExternalSemaphore (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_extSem,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 18933, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18933, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaDestroyExternalSemaphore", 0) < (0)) __PYX_ERR(0, 18933, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaDestroyExternalSemaphore", 1, 1, 1, i); __PYX_ERR(0, 18933, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18933, __pyx_L3_error) } __pyx_v_extSem = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaDestroyExternalSemaphore", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 18933, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaDestroyExternalSemaphore", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_160cudaDestroyExternalSemaphore(__pyx_self, __pyx_v_extSem); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_160cudaDestroyExternalSemaphore(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_extSem) { cudaExternalSemaphore_t __pyx_v_cyextSem; PyObject *__pyx_v_pextSem = NULL; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaDestroyExternalSemaphore", 0); /* "cuda/bindings/runtime.pyx":18956 * """ * cdef cyruntime.cudaExternalSemaphore_t cyextSem * if extSem is None: # <<<<<<<<<<<<<< * pextSem = 0 * elif isinstance(extSem, (cudaExternalSemaphore_t,)): */ __pyx_t_1 = (__pyx_v_extSem == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18957 * cdef cyruntime.cudaExternalSemaphore_t cyextSem * if extSem is None: * pextSem = 0 # <<<<<<<<<<<<<< * elif isinstance(extSem, (cudaExternalSemaphore_t,)): * pextSem = int(extSem) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pextSem = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":18956 * """ * cdef cyruntime.cudaExternalSemaphore_t cyextSem * if extSem is None: # <<<<<<<<<<<<<< * pextSem = 0 * elif isinstance(extSem, (cudaExternalSemaphore_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":18958 * if extSem is None: * pextSem = 0 * elif isinstance(extSem, (cudaExternalSemaphore_t,)): # <<<<<<<<<<<<<< * pextSem = int(extSem) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_extSem, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaExternalSemaphore_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":18959 * pextSem = 0 * elif isinstance(extSem, (cudaExternalSemaphore_t,)): * pextSem = int(extSem) # <<<<<<<<<<<<<< * else: * pextSem = int(cudaExternalSemaphore_t(extSem)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_extSem); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18959, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_pextSem = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":18958 * if extSem is None: * pextSem = 0 * elif isinstance(extSem, (cudaExternalSemaphore_t,)): # <<<<<<<<<<<<<< * pextSem = int(extSem) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":18961 * pextSem = int(extSem) * else: * pextSem = int(cudaExternalSemaphore_t(extSem)) # <<<<<<<<<<<<<< * cyextSem = pextSem * with nogil: */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaExternalSemaphore_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaExternalSemaphore_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_extSem}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18961, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18961, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_pextSem = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":18962 * else: * pextSem = int(cudaExternalSemaphore_t(extSem)) * cyextSem = pextSem # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaDestroyExternalSemaphore(cyextSem) */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pextSem); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 18962, __pyx_L1_error) __pyx_v_cyextSem = ((cudaExternalSemaphore_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":18963 * pextSem = int(cudaExternalSemaphore_t(extSem)) * cyextSem = pextSem * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDestroyExternalSemaphore(cyextSem) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":18964 * cyextSem = pextSem * with nogil: * err = cyruntime.cudaDestroyExternalSemaphore(cyextSem) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaDestroyExternalSemaphore(__pyx_v_cyextSem); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 18964, __pyx_L5_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":18963 * pextSem = int(cudaExternalSemaphore_t(extSem)) * cyextSem = pextSem * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDestroyExternalSemaphore(cyextSem) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L6; } __pyx_L5_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L6:; } } /* "cuda/bindings/runtime.pyx":18965 * with nogil: * err = cyruntime.cudaDestroyExternalSemaphore(cyextSem) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 18965, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18965, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18965, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18965, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 18965, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":18933 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDestroyExternalSemaphore(extSem): * """ Destroys an external semaphore. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaDestroyExternalSemaphore", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pextSem); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":18967 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaFuncSetCacheConfig(func, cacheConfig not None : cudaFuncCache): * """ Sets the preferred cache configuration for a device function. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_163cudaFuncSetCacheConfig(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_162cudaFuncSetCacheConfig, "cudaFuncSetCacheConfig(func, cacheConfig: cudaFuncCache)\n\nSets the preferred cache configuration for a device function.\n\nOn devices where the L1 cache and shared memory use the same hardware\nresources, this sets through `cacheConfig` the preferred cache\nconfiguration for the function specified via `func`. This is only a\npreference. The runtime will use the requested configuration if\npossible, but it is free to choose a different configuration if\nrequired to execute `func`.\n\n`func` is a device function symbol and must be declared as a `None`\nfunction. If the specified function does not exist, then\n:py:obj:`~.cudaErrorInvalidDeviceFunction` is returned. For templated\nfunctions, pass the function symbol as follows:\nfunc_name\n\nThis setting does nothing on devices where the size of the L1 cache and\nshared memory are fixed.\n\nLaunching a kernel with a different preference than the most recent\npreference setting may insert a device-side synchronization point.\n\nThe supported cache configurations are:\n\n- :py:obj:`~.cudaFuncCachePreferNone`: no preference for shared memory\n or L1 (default)\n\n- :py:obj:`~.cudaFuncCachePreferShared`: prefer larger shared memory\n and smaller L1 cache\n\n- :py:obj:`~.cudaFuncCachePreferL1`: prefer larger L1 cache and smaller\n shared memory\n\n- :py:obj:`~.cudaFuncCachePreferEqual`: prefer equal size L1 cache and\n shared memory\n\nParameters\n----------\nfunc : Any\n Device function symbol\ncacheConfig : :py:obj:`~.cudaFuncCache`\n Requested cache configuration\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidDeviceFunction`2\n\nSee Also\n--------\ncudaFuncSetCacheConfig (C++ API), :py:obj:`~.cudaFuncGetAttributes (C API)`, :py:obj:`~.cudaLaunchKernel (C API)`, :py:obj:`~.cuFuncSetCacheConfig`\n\nNotes\n-----\nThis API does not accept a :py:obj:`~.cudaKernel_t` casted as void*. If cache config modification is required for a :py:obj:`~"".cudaKernel_t` (or a global function), it can be replaced with a call to :py:obj:`~.cudaFuncSetAttributes` with the attribute :py:obj:`~.cudaFuncAttributePreferredSharedMemoryCarveout` to specify a more granular L1 cache and shared memory split configuration."); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_163cudaFuncSetCacheConfig = {"cudaFuncSetCacheConfig", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_163cudaFuncSetCacheConfig, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_162cudaFuncSetCacheConfig}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_163cudaFuncSetCacheConfig(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_func = 0; PyObject *__pyx_v_cacheConfig = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaFuncSetCacheConfig (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_func_2,&__pyx_mstate_global->__pyx_n_u_cacheConfig,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 18967, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 18967, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18967, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaFuncSetCacheConfig", 0) < (0)) __PYX_ERR(0, 18967, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaFuncSetCacheConfig", 1, 2, 2, i); __PYX_ERR(0, 18967, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 18967, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 18967, __pyx_L3_error) } __pyx_v_func = values[0]; __pyx_v_cacheConfig = values[1]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaFuncSetCacheConfig", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 18967, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaFuncSetCacheConfig", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_cacheConfig) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "cacheConfig"); __PYX_ERR(0, 18968, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_162cudaFuncSetCacheConfig(__pyx_self, __pyx_v_func, __pyx_v_cacheConfig); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_162cudaFuncSetCacheConfig(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_func, PyObject *__pyx_v_cacheConfig) { struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cyfunc = NULL; void *__pyx_v_cyfunc_ptr; enum cudaFuncCache __pyx_v_cycacheConfig; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_5; enum cudaFuncCache __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaFuncSetCacheConfig", 0); /* "cuda/bindings/runtime.pyx":19024 * This API does not accept a :py:obj:`~.cudaKernel_t` casted as void*. If cache config modification is required for a :py:obj:`~.cudaKernel_t` (or a global function), it can be replaced with a call to :py:obj:`~.cudaFuncSetAttributes` with the attribute :py:obj:`~.cudaFuncAttributePreferredSharedMemoryCarveout` to specify a more granular L1 cache and shared memory split configuration. * """ * cyfunc = _HelperInputVoidPtr(func) # <<<<<<<<<<<<<< * cdef void* cyfunc_ptr = cyfunc.cptr * cdef cyruntime.cudaFuncCache cycacheConfig = cacheConfig.value */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_func}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19024, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_cyfunc = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":19025 * """ * cyfunc = _HelperInputVoidPtr(func) * cdef void* cyfunc_ptr = cyfunc.cptr # <<<<<<<<<<<<<< * cdef cyruntime.cudaFuncCache cycacheConfig = cacheConfig.value * with nogil: */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cyfunc), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19025, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_5 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 19025, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cyfunc_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_5)); /* "cuda/bindings/runtime.pyx":19026 * cyfunc = _HelperInputVoidPtr(func) * cdef void* cyfunc_ptr = cyfunc.cptr * cdef cyruntime.cudaFuncCache cycacheConfig = cacheConfig.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaFuncSetCacheConfig(cyfunc_ptr, cycacheConfig) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_cacheConfig, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19026, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_6 = ((enum cudaFuncCache)__Pyx_PyLong_As_enum__cudaFuncCache(__pyx_t_1)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 19026, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cycacheConfig = __pyx_t_6; /* "cuda/bindings/runtime.pyx":19027 * cdef void* cyfunc_ptr = cyfunc.cptr * cdef cyruntime.cudaFuncCache cycacheConfig = cacheConfig.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaFuncSetCacheConfig(cyfunc_ptr, cycacheConfig) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":19028 * cdef cyruntime.cudaFuncCache cycacheConfig = cacheConfig.value * with nogil: * err = cyruntime.cudaFuncSetCacheConfig(cyfunc_ptr, cycacheConfig) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaFuncSetCacheConfig(__pyx_v_cyfunc_ptr, __pyx_v_cycacheConfig); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19028, __pyx_L4_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":19027 * cdef void* cyfunc_ptr = cyfunc.cptr * cdef cyruntime.cudaFuncCache cycacheConfig = cacheConfig.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaFuncSetCacheConfig(cyfunc_ptr, cycacheConfig) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":19029 * with nogil: * err = cyruntime.cudaFuncSetCacheConfig(cyfunc_ptr, cycacheConfig) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19029, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19029, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19029, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19029, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 19029, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":18967 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaFuncSetCacheConfig(func, cacheConfig not None : cudaFuncCache): * """ Sets the preferred cache configuration for a device function. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaFuncSetCacheConfig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_cyfunc); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":19031 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaFuncGetAttributes(func): * """ Find out attributes for a given function. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_165cudaFuncGetAttributes(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_164cudaFuncGetAttributes, "cudaFuncGetAttributes(func)\n\nFind out attributes for a given function.\n\nThis function obtains the attributes of a function specified via\n`func`. `func` is a device function symbol and must be declared as a\n`None` function. The fetched attributes are placed in `attr`. If the\nspecified function does not exist, then it is assumed to be a\n:py:obj:`~.cudaKernel_t` and used as is. For templated functions, pass\nthe function symbol as follows:\nfunc_name\n\nNote that some function attributes such as\n:py:obj:`~.maxThreadsPerBlock` may vary based on the device that is\ncurrently being used.\n\nParameters\n----------\nfunc : Any\n Device function symbol\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidDeviceFunction`2\nattr : :py:obj:`~.cudaFuncAttributes`\n Return pointer to function's attributes\n\nSee Also\n--------\n:py:obj:`~.cudaFuncSetCacheConfig (C API)`, cudaFuncGetAttributes (C++ API), :py:obj:`~.cudaLaunchKernel (C API)`, :py:obj:`~.cuFuncGetAttribute`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_165cudaFuncGetAttributes = {"cudaFuncGetAttributes", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_165cudaFuncGetAttributes, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_164cudaFuncGetAttributes}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_165cudaFuncGetAttributes(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_func = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaFuncGetAttributes (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_func_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19031, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19031, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaFuncGetAttributes", 0) < (0)) __PYX_ERR(0, 19031, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaFuncGetAttributes", 1, 1, 1, i); __PYX_ERR(0, 19031, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19031, __pyx_L3_error) } __pyx_v_func = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaFuncGetAttributes", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 19031, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaFuncGetAttributes", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_164cudaFuncGetAttributes(__pyx_self, __pyx_v_func); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_164cudaFuncGetAttributes(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_func) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaFuncAttributes *__pyx_v_attr = 0; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cyfunc = NULL; void *__pyx_v_cyfunc_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_5; cudaError_t __pyx_t_6; int __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaFuncGetAttributes", 0); /* "cuda/bindings/runtime.pyx":19063 * :py:obj:`~.cudaFuncSetCacheConfig (C API)`, cudaFuncGetAttributes (C++ API), :py:obj:`~.cudaLaunchKernel (C API)`, :py:obj:`~.cuFuncGetAttribute` * """ * cdef cudaFuncAttributes attr = cudaFuncAttributes() # <<<<<<<<<<<<<< * cyfunc = _HelperInputVoidPtr(func) * cdef void* cyfunc_ptr = cyfunc.cptr */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaFuncAttributes); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaFuncAttributes); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (1-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19063, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_attr = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaFuncAttributes *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":19064 * """ * cdef cudaFuncAttributes attr = cudaFuncAttributes() * cyfunc = _HelperInputVoidPtr(func) # <<<<<<<<<<<<<< * cdef void* cyfunc_ptr = cyfunc.cptr * with nogil: */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_func}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19064, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_cyfunc = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":19065 * cdef cudaFuncAttributes attr = cudaFuncAttributes() * cyfunc = _HelperInputVoidPtr(func) * cdef void* cyfunc_ptr = cyfunc.cptr # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaFuncGetAttributes(attr._pvt_ptr, cyfunc_ptr) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cyfunc), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19065, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_5 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 19065, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cyfunc_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_5)); /* "cuda/bindings/runtime.pyx":19066 * cyfunc = _HelperInputVoidPtr(func) * cdef void* cyfunc_ptr = cyfunc.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaFuncGetAttributes(attr._pvt_ptr, cyfunc_ptr) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":19067 * cdef void* cyfunc_ptr = cyfunc.cptr * with nogil: * err = cyruntime.cudaFuncGetAttributes(attr._pvt_ptr, cyfunc_ptr) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_6 = __pyx_f_4cuda_8bindings_9cyruntime_cudaFuncGetAttributes(((struct cudaFuncAttributes *)__pyx_v_attr->_pvt_ptr), __pyx_v_cyfunc_ptr); if (unlikely(__pyx_t_6 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19067, __pyx_L4_error) __pyx_v_err = __pyx_t_6; } /* "cuda/bindings/runtime.pyx":19066 * cyfunc = _HelperInputVoidPtr(func) * cdef void* cyfunc_ptr = cyfunc.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaFuncGetAttributes(attr._pvt_ptr, cyfunc_ptr) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":19068 * with nogil: * err = cyruntime.cudaFuncGetAttributes(attr._pvt_ptr, cyfunc_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], attr) */ __pyx_t_7 = (__pyx_v_err != cudaSuccess); if (__pyx_t_7) { /* "cuda/bindings/runtime.pyx":19069 * err = cyruntime.cudaFuncGetAttributes(attr._pvt_ptr, cyfunc_ptr) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], attr) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19069, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19069, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19069, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19069, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 19069, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, Py_None) != (0)) __PYX_ERR(0, 19069, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":19068 * with nogil: * err = cyruntime.cudaFuncGetAttributes(attr._pvt_ptr, cyfunc_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], attr) */ } /* "cuda/bindings/runtime.pyx":19070 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], attr) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19070, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19070, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19070, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19070, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 19070, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_attr); __Pyx_GIVEREF((PyObject *)__pyx_v_attr); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, ((PyObject *)__pyx_v_attr)) != (0)) __PYX_ERR(0, 19070, __pyx_L1_error); __pyx_t_1 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":19031 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaFuncGetAttributes(func): * """ Find out attributes for a given function. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaFuncGetAttributes", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_attr); __Pyx_XDECREF((PyObject *)__pyx_v_cyfunc); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":19072 * return (_dict_cudaError_t[err], attr) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaFuncSetAttribute(func, attr not None : cudaFuncAttribute, int value): * """ Set attributes for a given function. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_167cudaFuncSetAttribute(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_166cudaFuncSetAttribute, "cudaFuncSetAttribute(func, attr: cudaFuncAttribute, int value)\n\nSet attributes for a given function.\n\nThis function sets the attributes of a function specified via `func`.\nThe parameter `func` must be a pointer to a function that executes on\nthe device. The parameter specified by `func` must be declared as a\n`None` function. The enumeration defined by `attr` is set to the value\ndefined by `value`. If the specified function does not exist, then it\nis assumed to be a :py:obj:`~.cudaKernel_t` and used as is. If the\nspecified attribute cannot be written, or if the value is incorrect,\nthen :py:obj:`~.cudaErrorInvalidValue` is returned.\n\nValid values for `attr` are:\n\n- :py:obj:`~.cudaFuncAttributeMaxDynamicSharedMemorySize` - The\n requested maximum size in bytes of dynamically-allocated shared\n memory. The sum of this value and the function attribute\n :py:obj:`~.sharedSizeBytes` cannot exceed the device attribute\n :py:obj:`~.cudaDevAttrMaxSharedMemoryPerBlockOptin`. The maximal size\n of requestable dynamic shared memory may differ by GPU architecture.\n\n- :py:obj:`~.cudaFuncAttributePreferredSharedMemoryCarveout` - On\n devices where the L1 cache and shared memory use the same hardware\n resources, this sets the shared memory carveout preference, in\n percent of the total shared memory. See\n :py:obj:`~.cudaDevAttrMaxSharedMemoryPerMultiprocessor`. This is only\n a hint, and the driver can choose a different ratio if required to\n execute the function.\n\n- :py:obj:`~.cudaFuncAttributeRequiredClusterWidth`: The required\n cluster width in blocks. The width, height, and depth values must\n either all be 0 or all be positive. The validity of the cluster\n dimensions is checked at launch time. If the value is set during\n compile time, it cannot be set at runtime. Setting it at runtime will\n return cudaErrorNotPermitted.\n\n- :py:obj:`~.cudaFuncAttributeRequiredClusterHeight`: The required\n cluster height in blocks. The width, height, ""and depth values must\n either all be 0 or all be positive. The validity of the cluster\n dimensions is checked at launch time. If the value is set during\n compile time, it cannot be set at runtime. Setting it at runtime will\n return cudaErrorNotPermitted.\n\n- :py:obj:`~.cudaFuncAttributeRequiredClusterDepth`: The required\n cluster depth in blocks. The width, height, and depth values must\n either all be 0 or all be positive. The validity of the cluster\n dimensions is checked at launch time. If the value is set during\n compile time, it cannot be set at runtime. Setting it at runtime will\n return cudaErrorNotPermitted.\n\n- :py:obj:`~.cudaFuncAttributeNonPortableClusterSizeAllowed`: Indicates\n whether the function can be launched with non-portable cluster size.\n 1 is allowed, 0 is disallowed.\n\n- :py:obj:`~.cudaFuncAttributeClusterSchedulingPolicyPreference`: The\n block scheduling policy of a function. The value type is\n cudaClusterSchedulingPolicy.\n\ncudaLaunchKernel (C++ API), cudaFuncSetCacheConfig (C++ API),\n:py:obj:`~.cudaFuncGetAttributes (C API)`,\n\nParameters\n----------\nfunc : Any\n Function to get attributes of\nattr : :py:obj:`~.cudaFuncAttribute`\n Attribute to set\nvalue : int\n Value to set\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidDeviceFunction`, :py:obj:`~.cudaErrorInvalidValue`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_167cudaFuncSetAttribute = {"cudaFuncSetAttribute", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_167cudaFuncSetAttribute, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_166cudaFuncSetAttribute}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_167cudaFuncSetAttribute(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_func = 0; PyObject *__pyx_v_attr = 0; int __pyx_v_value; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[3] = {0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaFuncSetAttribute (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_func_2,&__pyx_mstate_global->__pyx_n_u_attr,&__pyx_mstate_global->__pyx_n_u_value,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19072, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 19072, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 19072, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19072, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaFuncSetAttribute", 0) < (0)) __PYX_ERR(0, 19072, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 3; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaFuncSetAttribute", 1, 3, 3, i); __PYX_ERR(0, 19072, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 3)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19072, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 19072, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 19072, __pyx_L3_error) } __pyx_v_func = values[0]; __pyx_v_attr = values[1]; __pyx_v_value = __Pyx_PyLong_As_int(values[2]); if (unlikely((__pyx_v_value == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19073, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaFuncSetAttribute", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 19072, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaFuncSetAttribute", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_attr) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "attr"); __PYX_ERR(0, 19073, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_166cudaFuncSetAttribute(__pyx_self, __pyx_v_func, __pyx_v_attr, __pyx_v_value); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_166cudaFuncSetAttribute(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_func, PyObject *__pyx_v_attr, int __pyx_v_value) { struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cyfunc = NULL; void *__pyx_v_cyfunc_ptr; enum cudaFuncAttribute __pyx_v_cyattr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_5; enum cudaFuncAttribute __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaFuncSetAttribute", 0); /* "cuda/bindings/runtime.pyx":19148 * :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidDeviceFunction`, :py:obj:`~.cudaErrorInvalidValue` * """ * cyfunc = _HelperInputVoidPtr(func) # <<<<<<<<<<<<<< * cdef void* cyfunc_ptr = cyfunc.cptr * cdef cyruntime.cudaFuncAttribute cyattr = attr.value */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_func}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19148, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_cyfunc = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":19149 * """ * cyfunc = _HelperInputVoidPtr(func) * cdef void* cyfunc_ptr = cyfunc.cptr # <<<<<<<<<<<<<< * cdef cyruntime.cudaFuncAttribute cyattr = attr.value * with nogil: */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cyfunc), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19149, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_5 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 19149, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cyfunc_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_5)); /* "cuda/bindings/runtime.pyx":19150 * cyfunc = _HelperInputVoidPtr(func) * cdef void* cyfunc_ptr = cyfunc.cptr * cdef cyruntime.cudaFuncAttribute cyattr = attr.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaFuncSetAttribute(cyfunc_ptr, cyattr, value) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_attr, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19150, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_6 = ((enum cudaFuncAttribute)__Pyx_PyLong_As_enum__cudaFuncAttribute(__pyx_t_1)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 19150, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cyattr = __pyx_t_6; /* "cuda/bindings/runtime.pyx":19151 * cdef void* cyfunc_ptr = cyfunc.cptr * cdef cyruntime.cudaFuncAttribute cyattr = attr.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaFuncSetAttribute(cyfunc_ptr, cyattr, value) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":19152 * cdef cyruntime.cudaFuncAttribute cyattr = attr.value * with nogil: * err = cyruntime.cudaFuncSetAttribute(cyfunc_ptr, cyattr, value) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaFuncSetAttribute(__pyx_v_cyfunc_ptr, __pyx_v_cyattr, __pyx_v_value); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19152, __pyx_L4_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":19151 * cdef void* cyfunc_ptr = cyfunc.cptr * cdef cyruntime.cudaFuncAttribute cyattr = attr.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaFuncSetAttribute(cyfunc_ptr, cyattr, value) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":19153 * with nogil: * err = cyruntime.cudaFuncSetAttribute(cyfunc_ptr, cyattr, value) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * ctypedef struct cudaStreamHostCallbackData_st: */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 19153, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":19072 * return (_dict_cudaError_t[err], attr) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaFuncSetAttribute(func, attr not None : cudaFuncAttribute, int value): * """ Set attributes for a given function. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaFuncSetAttribute", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_cyfunc); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":19161 * ctypedef cudaStreamHostCallbackData_st cudaStreamHostCallbackData * * @cython.show_performance_hints(False) # <<<<<<<<<<<<<< * cdef void cudaStreamRtHostCallbackWrapper(void *data) nogil: * cdef cudaStreamHostCallbackData *cbData = data */ static void __pyx_f_4cuda_8bindings_7runtime_cudaStreamRtHostCallbackWrapper(void *__pyx_v_data) { __pyx_t_4cuda_8bindings_7runtime_cudaStreamHostCallbackData *__pyx_v_cbData; /* "cuda/bindings/runtime.pyx":19163 * @cython.show_performance_hints(False) * cdef void cudaStreamRtHostCallbackWrapper(void *data) nogil: * cdef cudaStreamHostCallbackData *cbData = data # <<<<<<<<<<<<<< * with gil: * cbData.callback(cbData.userData) */ __pyx_v_cbData = ((__pyx_t_4cuda_8bindings_7runtime_cudaStreamHostCallbackData *)__pyx_v_data); /* "cuda/bindings/runtime.pyx":19164 * cdef void cudaStreamRtHostCallbackWrapper(void *data) nogil: * cdef cudaStreamHostCallbackData *cbData = data * with gil: # <<<<<<<<<<<<<< * cbData.callback(cbData.userData) * free(cbData) */ { PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); /*try:*/ { /* "cuda/bindings/runtime.pyx":19165 * cdef cudaStreamHostCallbackData *cbData = data * with gil: * cbData.callback(cbData.userData) # <<<<<<<<<<<<<< * free(cbData) * */ __pyx_v_cbData->callback(__pyx_v_cbData->userData); } /* "cuda/bindings/runtime.pyx":19164 * cdef void cudaStreamRtHostCallbackWrapper(void *data) nogil: * cdef cudaStreamHostCallbackData *cbData = data * with gil: # <<<<<<<<<<<<<< * cbData.callback(cbData.userData) * free(cbData) */ /*finally:*/ { /*normal exit:*/{ __Pyx_PyGILState_Release(__pyx_gilstate_save); goto __pyx_L5; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":19166 * with gil: * cbData.callback(cbData.userData) * free(cbData) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ free(__pyx_v_cbData); /* "cuda/bindings/runtime.pyx":19161 * ctypedef cudaStreamHostCallbackData_st cudaStreamHostCallbackData * * @cython.show_performance_hints(False) # <<<<<<<<<<<<<< * cdef void cudaStreamRtHostCallbackWrapper(void *data) nogil: * cdef cudaStreamHostCallbackData *cbData = data */ /* function exit code */ } /* "cuda/bindings/runtime.pyx":19168 * free(cbData) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaLaunchHostFunc(stream, fn, userData): * """ Enqueues a host function call in a stream. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_169cudaLaunchHostFunc(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_168cudaLaunchHostFunc, "cudaLaunchHostFunc(stream, fn, userData)\n\nEnqueues a host function call in a stream.\n\nEnqueues a host function to run in a stream. The function will be\ncalled after currently enqueued work and will block work added after\nit.\n\nThe host function must not make any CUDA API calls. Attempting to use a\nCUDA API may result in :py:obj:`~.cudaErrorNotPermitted`, but this is\nnot required. The host function must not perform any synchronization\nthat may depend on outstanding CUDA work not mandated to run earlier.\nHost functions without a mandated order (such as in independent\nstreams) execute in undefined order and may be serialized.\n\nFor the purposes of Unified Memory, execution makes a number of\nguarantees:\n\n- The stream is considered idle for the duration of the function's\n execution. Thus, for example, the function may always use memory\n attached to the stream it was enqueued in.\n\n- The start of execution of the function has the same effect as\n synchronizing an event recorded in the same stream immediately prior\n to the function. It thus synchronizes streams which have been\n \"joined\" prior to the function.\n\n- Adding device work to any stream does not have the effect of making\n the stream active until all preceding host functions and stream\n callbacks have executed. Thus, for example, a function might use\n global attached memory even if work has been added to another stream,\n if the work has been ordered behind the function call with an event.\n\n- Completion of the function does not cause a stream to become active\n except as described above. The stream will remain idle if no device\n work follows the function, and will remain idle across consecutive\n host functions or stream callbacks without device work in between.\n Thus, for example, stream synchronization can be done by signaling\n from a host function at the end of the stream.\n\nNote that, in constrast to :py:obj:`~.cuStreamAddCallback`, the\nfunction will not be called"" in the event of an error in the CUDA\ncontext.\n\nParameters\n----------\nhStream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Stream to enqueue function call in\nfn : :py:obj:`~.cudaHostFn_t`\n The function to call once preceding stream operations are complete\nuserData : Any\n User-specified data to be passed to the function\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidResourceHandle`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorNotSupported`\n\nSee Also\n--------\n:py:obj:`~.cudaStreamCreate`, :py:obj:`~.cudaStreamQuery`, :py:obj:`~.cudaStreamSynchronize`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cudaStreamDestroy`, :py:obj:`~.cudaMallocManaged`, :py:obj:`~.cudaStreamAttachMemAsync`, :py:obj:`~.cudaStreamAddCallback`, :py:obj:`~.cuLaunchHostFunc`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_169cudaLaunchHostFunc = {"cudaLaunchHostFunc", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_169cudaLaunchHostFunc, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_168cudaLaunchHostFunc}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_169cudaLaunchHostFunc(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_stream = 0; PyObject *__pyx_v_fn = 0; PyObject *__pyx_v_userData = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[3] = {0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaLaunchHostFunc (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_stream,&__pyx_mstate_global->__pyx_n_u_fn_2,&__pyx_mstate_global->__pyx_n_u_userData_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19168, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 19168, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 19168, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19168, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaLaunchHostFunc", 0) < (0)) __PYX_ERR(0, 19168, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 3; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaLaunchHostFunc", 1, 3, 3, i); __PYX_ERR(0, 19168, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 3)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19168, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 19168, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 19168, __pyx_L3_error) } __pyx_v_stream = values[0]; __pyx_v_fn = values[1]; __pyx_v_userData = values[2]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaLaunchHostFunc", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 19168, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchHostFunc", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_168cudaLaunchHostFunc(__pyx_self, __pyx_v_stream, __pyx_v_fn, __pyx_v_userData); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_168cudaLaunchHostFunc(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_stream, PyObject *__pyx_v_fn, PyObject *__pyx_v_userData) { cudaHostFn_t __pyx_v_cyfn; PyObject *__pyx_v_pfn = NULL; cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cyuserData = NULL; void *__pyx_v_cyuserData_ptr; __pyx_t_4cuda_8bindings_7runtime_cudaStreamHostCallbackData *__pyx_v_cbData; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; int __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaLaunchHostFunc", 0); /* "cuda/bindings/runtime.pyx":19231 * """ * cdef cyruntime.cudaHostFn_t cyfn * if fn is None: # <<<<<<<<<<<<<< * pfn = 0 * elif isinstance(fn, (cudaHostFn_t,)): */ __pyx_t_1 = (__pyx_v_fn == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":19232 * cdef cyruntime.cudaHostFn_t cyfn * if fn is None: * pfn = 0 # <<<<<<<<<<<<<< * elif isinstance(fn, (cudaHostFn_t,)): * pfn = int(fn) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pfn = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":19231 * """ * cdef cyruntime.cudaHostFn_t cyfn * if fn is None: # <<<<<<<<<<<<<< * pfn = 0 * elif isinstance(fn, (cudaHostFn_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":19233 * if fn is None: * pfn = 0 * elif isinstance(fn, (cudaHostFn_t,)): # <<<<<<<<<<<<<< * pfn = int(fn) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_fn, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaHostFn_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":19234 * pfn = 0 * elif isinstance(fn, (cudaHostFn_t,)): * pfn = int(fn) # <<<<<<<<<<<<<< * else: * pfn = int(cudaHostFn_t(fn)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_fn); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_pfn = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":19233 * if fn is None: * pfn = 0 * elif isinstance(fn, (cudaHostFn_t,)): # <<<<<<<<<<<<<< * pfn = int(fn) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":19236 * pfn = int(fn) * else: * pfn = int(cudaHostFn_t(fn)) # <<<<<<<<<<<<<< * cyfn = pfn * cdef cyruntime.cudaStream_t cystream */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaHostFn_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaHostFn_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_fn}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19236, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19236, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_pfn = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":19237 * else: * pfn = int(cudaHostFn_t(fn)) * cyfn = pfn # <<<<<<<<<<<<<< * cdef cyruntime.cudaStream_t cystream * if stream is None: */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pfn); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 19237, __pyx_L1_error) __pyx_v_cyfn = ((cudaHostFn_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":19239 * cyfn = pfn * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":19240 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":19239 * cyfn = pfn * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L4; } /* "cuda/bindings/runtime.pyx":19241 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_7 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_7) { } else { __pyx_t_1 = __pyx_t_7; goto __pyx_L5_bool_binop_done; } __pyx_t_7 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_7; __pyx_L5_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":19242 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_4 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19242, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_v_pstream = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "cuda/bindings/runtime.pyx":19241 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L4; } /* "cuda/bindings/runtime.pyx":19244 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * cyuserData = _HelperInputVoidPtr(userData) */ /*else*/ { __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_stream}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19244, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_4); } __pyx_t_3 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_4)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19244, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF((PyObject *)__pyx_t_4); __pyx_t_4 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; } __pyx_L4:; /* "cuda/bindings/runtime.pyx":19245 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * cyuserData = _HelperInputVoidPtr(userData) * cdef void* cyuserData_ptr = cyuserData.cptr */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 19245, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":19246 * pstream = int(cudaStream_t(stream)) * cystream = pstream * cyuserData = _HelperInputVoidPtr(userData) # <<<<<<<<<<<<<< * cdef void* cyuserData_ptr = cyuserData.cptr * */ __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_userData}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19246, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_v_cyuserData = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":19247 * cystream = pstream * cyuserData = _HelperInputVoidPtr(userData) * cdef void* cyuserData_ptr = cyuserData.cptr # <<<<<<<<<<<<<< * * cdef cudaStreamHostCallbackData *cbData = NULL */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cyuserData), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_3); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 19247, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_cyuserData_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":19249 * cdef void* cyuserData_ptr = cyuserData.cptr * * cdef cudaStreamHostCallbackData *cbData = NULL # <<<<<<<<<<<<<< * cbData = malloc(sizeof(cbData[0])) * if cbData == NULL: */ __pyx_v_cbData = NULL; /* "cuda/bindings/runtime.pyx":19250 * * cdef cudaStreamHostCallbackData *cbData = NULL * cbData = malloc(sizeof(cbData[0])) # <<<<<<<<<<<<<< * if cbData == NULL: * return (cudaError_t.cudaErrorMemoryAllocation,) */ __pyx_v_cbData = ((__pyx_t_4cuda_8bindings_7runtime_cudaStreamHostCallbackData *)malloc((sizeof((__pyx_v_cbData[0]))))); /* "cuda/bindings/runtime.pyx":19251 * cdef cudaStreamHostCallbackData *cbData = NULL * cbData = malloc(sizeof(cbData[0])) * if cbData == NULL: # <<<<<<<<<<<<<< * return (cudaError_t.cudaErrorMemoryAllocation,) * cbData.callback = cyfn */ __pyx_t_1 = (__pyx_v_cbData == NULL); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":19252 * cbData = malloc(sizeof(cbData[0])) * if cbData == NULL: * return (cudaError_t.cudaErrorMemoryAllocation,) # <<<<<<<<<<<<<< * cbData.callback = cyfn * cbData.userData = cyuserData_ptr */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19252, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_cudaErrorMemoryAllocation); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19252, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19252, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 19252, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":19251 * cdef cudaStreamHostCallbackData *cbData = NULL * cbData = malloc(sizeof(cbData[0])) * if cbData == NULL: # <<<<<<<<<<<<<< * return (cudaError_t.cudaErrorMemoryAllocation,) * cbData.callback = cyfn */ } /* "cuda/bindings/runtime.pyx":19253 * if cbData == NULL: * return (cudaError_t.cudaErrorMemoryAllocation,) * cbData.callback = cyfn # <<<<<<<<<<<<<< * cbData.userData = cyuserData_ptr * */ __pyx_v_cbData->callback = __pyx_v_cyfn; /* "cuda/bindings/runtime.pyx":19254 * return (cudaError_t.cudaErrorMemoryAllocation,) * cbData.callback = cyfn * cbData.userData = cyuserData_ptr # <<<<<<<<<<<<<< * * with nogil: */ __pyx_v_cbData->userData = __pyx_v_cyuserData_ptr; /* "cuda/bindings/runtime.pyx":19256 * cbData.userData = cyuserData_ptr * * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaLaunchHostFunc(cystream, cudaStreamRtHostCallbackWrapper, cbData) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":19257 * * with nogil: * err = cyruntime.cudaLaunchHostFunc(cystream, cudaStreamRtHostCallbackWrapper, cbData) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * free(cbData) */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaLaunchHostFunc(__pyx_v_cystream, ((cudaHostFn_t)__pyx_f_4cuda_8bindings_7runtime_cudaStreamRtHostCallbackWrapper), ((void *)__pyx_v_cbData)); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19257, __pyx_L9_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":19256 * cbData.userData = cyuserData_ptr * * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaLaunchHostFunc(cystream, cudaStreamRtHostCallbackWrapper, cbData) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L10; } __pyx_L9_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L10:; } } /* "cuda/bindings/runtime.pyx":19258 * with nogil: * err = cyruntime.cudaLaunchHostFunc(cystream, cudaStreamRtHostCallbackWrapper, cbData) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * free(cbData) * return (_dict_cudaError_t[err],) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":19259 * err = cyruntime.cudaLaunchHostFunc(cystream, cudaStreamRtHostCallbackWrapper, cbData) * if err != cyruntime.cudaSuccess: * free(cbData) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ free(__pyx_v_cbData); /* "cuda/bindings/runtime.pyx":19258 * with nogil: * err = cyruntime.cudaLaunchHostFunc(cystream, cudaStreamRtHostCallbackWrapper, cbData) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * free(cbData) * return (_dict_cudaError_t[err],) */ } /* "cuda/bindings/runtime.pyx":19260 * if err != cyruntime.cudaSuccess: * free(cbData) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19260, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19260, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19260, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19260, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 19260, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":19168 * free(cbData) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaLaunchHostFunc(stream, fn, userData): * """ Enqueues a host function call in a stream. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaLaunchHostFunc", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pfn); __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XDECREF((PyObject *)__pyx_v_cyuserData); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":19262 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaFuncSetSharedMemConfig(func, config not None : cudaSharedMemConfig): * """ Sets the shared memory configuration for a device function. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_171cudaFuncSetSharedMemConfig(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_170cudaFuncSetSharedMemConfig, "cudaFuncSetSharedMemConfig(func, config: cudaSharedMemConfig)\n\nSets the shared memory configuration for a device function.\n\n[Deprecated]\n\nOn devices with configurable shared memory banks, this function will\nforce all subsequent launches of the specified device function to have\nthe given shared memory bank size configuration. On any given launch of\nthe function, the shared memory configuration of the device will be\ntemporarily changed if needed to suit the function's preferred\nconfiguration. Changes in shared memory configuration between\nsubsequent launches of functions, may introduce a device side\nsynchronization point.\n\nAny per-function setting of shared memory bank size set via\n:py:obj:`~.cudaFuncSetSharedMemConfig` will override the device wide\nsetting set by :py:obj:`~.cudaDeviceSetSharedMemConfig`.\n\nChanging the shared memory bank size will not increase shared memory\nusage or affect occupancy of kernels, but may have major effects on\nperformance. Larger bank sizes will allow for greater potential\nbandwidth to shared memory, but will change what kinds of accesses to\nshared memory will result in bank conflicts.\n\nThis function will do nothing on devices with fixed shared memory bank\nsize.\n\nFor templated functions, pass the function symbol as follows:\nfunc_name\n\nThe supported bank configurations are:\n\n- :py:obj:`~.cudaSharedMemBankSizeDefault`: use the device's shared\n memory configuration when launching this function.\n\n- :py:obj:`~.cudaSharedMemBankSizeFourByte`: set shared memory bank\n width to be four bytes natively when launching this function.\n\n- :py:obj:`~.cudaSharedMemBankSizeEightByte`: set shared memory bank\n width to be eight bytes natively when launching this function.\n\nParameters\n----------\nfunc : Any\n Device function symbol\nconfig : :py:obj:`~.cudaSharedMemConfig`\n Requested shared memory configuration\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`"", :py:obj:`~.cudaErrorInvalidDeviceFunction`, :py:obj:`~.cudaErrorInvalidValue`,2\n\nSee Also\n--------\n:py:obj:`~.cudaDeviceSetSharedMemConfig`, :py:obj:`~.cudaDeviceGetSharedMemConfig`, :py:obj:`~.cudaDeviceSetCacheConfig`, :py:obj:`~.cudaDeviceGetCacheConfig`, :py:obj:`~.cudaFuncSetCacheConfig`, :py:obj:`~.cuFuncSetSharedMemConfig`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_171cudaFuncSetSharedMemConfig = {"cudaFuncSetSharedMemConfig", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_171cudaFuncSetSharedMemConfig, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_170cudaFuncSetSharedMemConfig}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_171cudaFuncSetSharedMemConfig(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_func = 0; PyObject *__pyx_v_config = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaFuncSetSharedMemConfig (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_func_2,&__pyx_mstate_global->__pyx_n_u_config,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19262, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 19262, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19262, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaFuncSetSharedMemConfig", 0) < (0)) __PYX_ERR(0, 19262, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaFuncSetSharedMemConfig", 1, 2, 2, i); __PYX_ERR(0, 19262, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19262, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 19262, __pyx_L3_error) } __pyx_v_func = values[0]; __pyx_v_config = values[1]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaFuncSetSharedMemConfig", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 19262, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaFuncSetSharedMemConfig", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_config) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "config"); __PYX_ERR(0, 19263, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_170cudaFuncSetSharedMemConfig(__pyx_self, __pyx_v_func, __pyx_v_config); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_170cudaFuncSetSharedMemConfig(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_func, PyObject *__pyx_v_config) { struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cyfunc = NULL; void *__pyx_v_cyfunc_ptr; enum cudaSharedMemConfig __pyx_v_cyconfig; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_5; enum cudaSharedMemConfig __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaFuncSetSharedMemConfig", 0); /* "cuda/bindings/runtime.pyx":19320 * :py:obj:`~.cudaDeviceSetSharedMemConfig`, :py:obj:`~.cudaDeviceGetSharedMemConfig`, :py:obj:`~.cudaDeviceSetCacheConfig`, :py:obj:`~.cudaDeviceGetCacheConfig`, :py:obj:`~.cudaFuncSetCacheConfig`, :py:obj:`~.cuFuncSetSharedMemConfig` * """ * cyfunc = _HelperInputVoidPtr(func) # <<<<<<<<<<<<<< * cdef void* cyfunc_ptr = cyfunc.cptr * cdef cyruntime.cudaSharedMemConfig cyconfig = config.value */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_func}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19320, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_cyfunc = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":19321 * """ * cyfunc = _HelperInputVoidPtr(func) * cdef void* cyfunc_ptr = cyfunc.cptr # <<<<<<<<<<<<<< * cdef cyruntime.cudaSharedMemConfig cyconfig = config.value * with nogil: */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cyfunc), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19321, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_5 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 19321, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cyfunc_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_5)); /* "cuda/bindings/runtime.pyx":19322 * cyfunc = _HelperInputVoidPtr(func) * cdef void* cyfunc_ptr = cyfunc.cptr * cdef cyruntime.cudaSharedMemConfig cyconfig = config.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaFuncSetSharedMemConfig(cyfunc_ptr, cyconfig) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_config, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19322, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_6 = ((enum cudaSharedMemConfig)__Pyx_PyLong_As_enum__cudaSharedMemConfig(__pyx_t_1)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 19322, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cyconfig = __pyx_t_6; /* "cuda/bindings/runtime.pyx":19323 * cdef void* cyfunc_ptr = cyfunc.cptr * cdef cyruntime.cudaSharedMemConfig cyconfig = config.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaFuncSetSharedMemConfig(cyfunc_ptr, cyconfig) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":19324 * cdef cyruntime.cudaSharedMemConfig cyconfig = config.value * with nogil: * err = cyruntime.cudaFuncSetSharedMemConfig(cyfunc_ptr, cyconfig) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaFuncSetSharedMemConfig(__pyx_v_cyfunc_ptr, __pyx_v_cyconfig); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19324, __pyx_L4_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":19323 * cdef void* cyfunc_ptr = cyfunc.cptr * cdef cyruntime.cudaSharedMemConfig cyconfig = config.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaFuncSetSharedMemConfig(cyfunc_ptr, cyconfig) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":19325 * with nogil: * err = cyruntime.cudaFuncSetSharedMemConfig(cyfunc_ptr, cyconfig) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19325, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19325, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19325, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19325, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 19325, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":19262 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaFuncSetSharedMemConfig(func, config not None : cudaSharedMemConfig): * """ Sets the shared memory configuration for a device function. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaFuncSetSharedMemConfig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_cyfunc); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":19327 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaOccupancyMaxActiveBlocksPerMultiprocessor(func, int blockSize, size_t dynamicSMemSize): * """ Returns occupancy for a device function. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_173cudaOccupancyMaxActiveBlocksPerMultiprocessor(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_172cudaOccupancyMaxActiveBlocksPerMultiprocessor, "cudaOccupancyMaxActiveBlocksPerMultiprocessor(func, int blockSize, size_t dynamicSMemSize)\n\nReturns occupancy for a device function.\n\nReturns in `*numBlocks` the maximum number of active blocks per\nstreaming multiprocessor for the device function.\n\nParameters\n----------\nfunc : Any\n Kernel function for which occupancy is calculated\nblockSize : int\n Block size the kernel is intended to be launched with\ndynamicSMemSize : size_t\n Per-block dynamic shared memory usage intended, in bytes\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidDevice`, :py:obj:`~.cudaErrorInvalidDeviceFunction`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorUnknown`,\nnumBlocks : int\n Returned occupancy\n\nSee Also\n--------\n:py:obj:`~.cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags`, cudaOccupancyMaxPotentialBlockSize (C++ API), cudaOccupancyMaxPotentialBlockSizeWithFlags (C++ API), cudaOccupancyMaxPotentialBlockSizeVariableSMem (C++ API), cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags (C++ API), cudaOccupancyAvailableDynamicSMemPerBlock (C++ API), :py:obj:`~.cuOccupancyMaxActiveBlocksPerMultiprocessor`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_173cudaOccupancyMaxActiveBlocksPerMultiprocessor = {"cudaOccupancyMaxActiveBlocksPerMultiprocessor", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_173cudaOccupancyMaxActiveBlocksPerMultiprocessor, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_172cudaOccupancyMaxActiveBlocksPerMultiprocessor}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_173cudaOccupancyMaxActiveBlocksPerMultiprocessor(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_func = 0; int __pyx_v_blockSize; size_t __pyx_v_dynamicSMemSize; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[3] = {0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaOccupancyMaxActiveBlocksPerMultiprocessor (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_func_2,&__pyx_mstate_global->__pyx_n_u_blockSize,&__pyx_mstate_global->__pyx_n_u_dynamicSMemSize,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19327, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 19327, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 19327, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19327, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaOccupancyMaxActiveBlocksPerMultiprocessor", 0) < (0)) __PYX_ERR(0, 19327, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 3; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaOccupancyMaxActiveBlocksPerMultiprocessor", 1, 3, 3, i); __PYX_ERR(0, 19327, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 3)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19327, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 19327, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 19327, __pyx_L3_error) } __pyx_v_func = values[0]; __pyx_v_blockSize = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_blockSize == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19328, __pyx_L3_error) __pyx_v_dynamicSMemSize = __Pyx_PyLong_As_size_t(values[2]); if (unlikely((__pyx_v_dynamicSMemSize == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 19328, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaOccupancyMaxActiveBlocksPerMultiprocessor", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 19327, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaOccupancyMaxActiveBlocksPerMultiprocessor", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_172cudaOccupancyMaxActiveBlocksPerMultiprocessor(__pyx_self, __pyx_v_func, __pyx_v_blockSize, __pyx_v_dynamicSMemSize); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_172cudaOccupancyMaxActiveBlocksPerMultiprocessor(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_func, int __pyx_v_blockSize, size_t __pyx_v_dynamicSMemSize) { int __pyx_v_numBlocks; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cyfunc = NULL; void *__pyx_v_cyfunc_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_5; cudaError_t __pyx_t_6; int __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaOccupancyMaxActiveBlocksPerMultiprocessor", 0); /* "cuda/bindings/runtime.pyx":19354 * :py:obj:`~.cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags`, cudaOccupancyMaxPotentialBlockSize (C++ API), cudaOccupancyMaxPotentialBlockSizeWithFlags (C++ API), cudaOccupancyMaxPotentialBlockSizeVariableSMem (C++ API), cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags (C++ API), cudaOccupancyAvailableDynamicSMemPerBlock (C++ API), :py:obj:`~.cuOccupancyMaxActiveBlocksPerMultiprocessor` * """ * cdef int numBlocks = 0 # <<<<<<<<<<<<<< * cyfunc = _HelperInputVoidPtr(func) * cdef void* cyfunc_ptr = cyfunc.cptr */ __pyx_v_numBlocks = 0; /* "cuda/bindings/runtime.pyx":19355 * """ * cdef int numBlocks = 0 * cyfunc = _HelperInputVoidPtr(func) # <<<<<<<<<<<<<< * cdef void* cyfunc_ptr = cyfunc.cptr * with nogil: */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_func}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19355, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_cyfunc = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":19356 * cdef int numBlocks = 0 * cyfunc = _HelperInputVoidPtr(func) * cdef void* cyfunc_ptr = cyfunc.cptr # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks, cyfunc_ptr, blockSize, dynamicSMemSize) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cyfunc), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19356, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_5 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 19356, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cyfunc_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_5)); /* "cuda/bindings/runtime.pyx":19357 * cyfunc = _HelperInputVoidPtr(func) * cdef void* cyfunc_ptr = cyfunc.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks, cyfunc_ptr, blockSize, dynamicSMemSize) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":19358 * cdef void* cyfunc_ptr = cyfunc.cptr * with nogil: * err = cyruntime.cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks, cyfunc_ptr, blockSize, dynamicSMemSize) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_6 = __pyx_f_4cuda_8bindings_9cyruntime_cudaOccupancyMaxActiveBlocksPerMultiprocessor((&__pyx_v_numBlocks), __pyx_v_cyfunc_ptr, __pyx_v_blockSize, __pyx_v_dynamicSMemSize); if (unlikely(__pyx_t_6 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19358, __pyx_L4_error) __pyx_v_err = __pyx_t_6; } /* "cuda/bindings/runtime.pyx":19357 * cyfunc = _HelperInputVoidPtr(func) * cdef void* cyfunc_ptr = cyfunc.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks, cyfunc_ptr, blockSize, dynamicSMemSize) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":19359 * with nogil: * err = cyruntime.cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks, cyfunc_ptr, blockSize, dynamicSMemSize) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], numBlocks) */ __pyx_t_7 = (__pyx_v_err != cudaSuccess); if (__pyx_t_7) { /* "cuda/bindings/runtime.pyx":19360 * err = cyruntime.cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks, cyfunc_ptr, blockSize, dynamicSMemSize) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], numBlocks) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19360, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19360, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19360, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19360, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 19360, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 19360, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":19359 * with nogil: * err = cyruntime.cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks, cyfunc_ptr, blockSize, dynamicSMemSize) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], numBlocks) */ } /* "cuda/bindings/runtime.pyx":19361 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], numBlocks) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19361, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19361, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19361, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyLong_From_int(__pyx_v_numBlocks); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19361, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19361, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 19361, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2) != (0)) __PYX_ERR(0, 19361, __pyx_L1_error); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":19327 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaOccupancyMaxActiveBlocksPerMultiprocessor(func, int blockSize, size_t dynamicSMemSize): * """ Returns occupancy for a device function. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaOccupancyMaxActiveBlocksPerMultiprocessor", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_cyfunc); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":19363 * return (_dict_cudaError_t[err], numBlocks) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaOccupancyAvailableDynamicSMemPerBlock(func, int numBlocks, int blockSize): * """ Returns dynamic shared memory available per block when launching `numBlocks` blocks on SM. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_175cudaOccupancyAvailableDynamicSMemPerBlock(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_174cudaOccupancyAvailableDynamicSMemPerBlock, "cudaOccupancyAvailableDynamicSMemPerBlock(func, int numBlocks, int blockSize)\n\nReturns dynamic shared memory available per block when launching `numBlocks` blocks on SM.\n\nReturns in `*dynamicSmemSize` the maximum size of dynamic shared memory\nto allow `numBlocks` blocks per SM.\n\nParameters\n----------\nfunc : Any\n Kernel function for which occupancy is calculated\nnumBlocks : int\n Number of blocks to fit on SM\nblockSize : int\n Size of the block\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidDevice`, :py:obj:`~.cudaErrorInvalidDeviceFunction`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorUnknown`,\ndynamicSmemSize : int\n Returned maximum dynamic shared memory\n\nSee Also\n--------\n:py:obj:`~.cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags`, cudaOccupancyMaxPotentialBlockSize (C++ API), cudaOccupancyMaxPotentialBlockSizeWithFlags (C++ API), cudaOccupancyMaxPotentialBlockSizeVariableSMem (C++ API), cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags (C++ API), :py:obj:`~.cudaOccupancyAvailableDynamicSMemPerBlock`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_175cudaOccupancyAvailableDynamicSMemPerBlock = {"cudaOccupancyAvailableDynamicSMemPerBlock", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_175cudaOccupancyAvailableDynamicSMemPerBlock, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_174cudaOccupancyAvailableDynamicSMemPerBlock}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_175cudaOccupancyAvailableDynamicSMemPerBlock(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_func = 0; int __pyx_v_numBlocks; int __pyx_v_blockSize; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[3] = {0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaOccupancyAvailableDynamicSMemPerBlock (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_func_2,&__pyx_mstate_global->__pyx_n_u_numBlocks,&__pyx_mstate_global->__pyx_n_u_blockSize,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19363, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 19363, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 19363, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19363, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaOccupancyAvailableDynamicSMemPerBlock", 0) < (0)) __PYX_ERR(0, 19363, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 3; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaOccupancyAvailableDynamicSMemPerBlock", 1, 3, 3, i); __PYX_ERR(0, 19363, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 3)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19363, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 19363, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 19363, __pyx_L3_error) } __pyx_v_func = values[0]; __pyx_v_numBlocks = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_numBlocks == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19364, __pyx_L3_error) __pyx_v_blockSize = __Pyx_PyLong_As_int(values[2]); if (unlikely((__pyx_v_blockSize == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19364, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaOccupancyAvailableDynamicSMemPerBlock", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 19363, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaOccupancyAvailableDynamicSMemPerBlock", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_174cudaOccupancyAvailableDynamicSMemPerBlock(__pyx_self, __pyx_v_func, __pyx_v_numBlocks, __pyx_v_blockSize); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_174cudaOccupancyAvailableDynamicSMemPerBlock(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_func, int __pyx_v_numBlocks, int __pyx_v_blockSize) { size_t __pyx_v_dynamicSmemSize; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cyfunc = NULL; void *__pyx_v_cyfunc_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_5; cudaError_t __pyx_t_6; int __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaOccupancyAvailableDynamicSMemPerBlock", 0); /* "cuda/bindings/runtime.pyx":19390 * :py:obj:`~.cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags`, cudaOccupancyMaxPotentialBlockSize (C++ API), cudaOccupancyMaxPotentialBlockSizeWithFlags (C++ API), cudaOccupancyMaxPotentialBlockSizeVariableSMem (C++ API), cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags (C++ API), :py:obj:`~.cudaOccupancyAvailableDynamicSMemPerBlock` * """ * cdef size_t dynamicSmemSize = 0 # <<<<<<<<<<<<<< * cyfunc = _HelperInputVoidPtr(func) * cdef void* cyfunc_ptr = cyfunc.cptr */ __pyx_v_dynamicSmemSize = 0; /* "cuda/bindings/runtime.pyx":19391 * """ * cdef size_t dynamicSmemSize = 0 * cyfunc = _HelperInputVoidPtr(func) # <<<<<<<<<<<<<< * cdef void* cyfunc_ptr = cyfunc.cptr * with nogil: */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_func}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19391, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_cyfunc = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":19392 * cdef size_t dynamicSmemSize = 0 * cyfunc = _HelperInputVoidPtr(func) * cdef void* cyfunc_ptr = cyfunc.cptr # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaOccupancyAvailableDynamicSMemPerBlock(&dynamicSmemSize, cyfunc_ptr, numBlocks, blockSize) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cyfunc), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19392, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_5 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 19392, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cyfunc_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_5)); /* "cuda/bindings/runtime.pyx":19393 * cyfunc = _HelperInputVoidPtr(func) * cdef void* cyfunc_ptr = cyfunc.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaOccupancyAvailableDynamicSMemPerBlock(&dynamicSmemSize, cyfunc_ptr, numBlocks, blockSize) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":19394 * cdef void* cyfunc_ptr = cyfunc.cptr * with nogil: * err = cyruntime.cudaOccupancyAvailableDynamicSMemPerBlock(&dynamicSmemSize, cyfunc_ptr, numBlocks, blockSize) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_6 = __pyx_f_4cuda_8bindings_9cyruntime_cudaOccupancyAvailableDynamicSMemPerBlock((&__pyx_v_dynamicSmemSize), __pyx_v_cyfunc_ptr, __pyx_v_numBlocks, __pyx_v_blockSize); if (unlikely(__pyx_t_6 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19394, __pyx_L4_error) __pyx_v_err = __pyx_t_6; } /* "cuda/bindings/runtime.pyx":19393 * cyfunc = _HelperInputVoidPtr(func) * cdef void* cyfunc_ptr = cyfunc.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaOccupancyAvailableDynamicSMemPerBlock(&dynamicSmemSize, cyfunc_ptr, numBlocks, blockSize) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":19395 * with nogil: * err = cyruntime.cudaOccupancyAvailableDynamicSMemPerBlock(&dynamicSmemSize, cyfunc_ptr, numBlocks, blockSize) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], dynamicSmemSize) */ __pyx_t_7 = (__pyx_v_err != cudaSuccess); if (__pyx_t_7) { /* "cuda/bindings/runtime.pyx":19396 * err = cyruntime.cudaOccupancyAvailableDynamicSMemPerBlock(&dynamicSmemSize, cyfunc_ptr, numBlocks, blockSize) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], dynamicSmemSize) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19396, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19396, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19396, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19396, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 19396, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 19396, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":19395 * with nogil: * err = cyruntime.cudaOccupancyAvailableDynamicSMemPerBlock(&dynamicSmemSize, cyfunc_ptr, numBlocks, blockSize) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], dynamicSmemSize) */ } /* "cuda/bindings/runtime.pyx":19397 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], dynamicSmemSize) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyLong_FromSize_t(__pyx_v_dynamicSmemSize); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 19397, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2) != (0)) __PYX_ERR(0, 19397, __pyx_L1_error); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":19363 * return (_dict_cudaError_t[err], numBlocks) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaOccupancyAvailableDynamicSMemPerBlock(func, int numBlocks, int blockSize): * """ Returns dynamic shared memory available per block when launching `numBlocks` blocks on SM. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaOccupancyAvailableDynamicSMemPerBlock", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_cyfunc); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":19399 * return (_dict_cudaError_t[err], dynamicSmemSize) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(func, int blockSize, size_t dynamicSMemSize, unsigned int flags): * """ Returns occupancy for a device function with the specified flags. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_177cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_176cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags, "cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(func, int blockSize, size_t dynamicSMemSize, unsigned int flags)\n\nReturns occupancy for a device function with the specified flags.\n\nReturns in `*numBlocks` the maximum number of active blocks per\nstreaming multiprocessor for the device function.\n\nThe `flags` parameter controls how special cases are handled. Valid\nflags include:\n\n- :py:obj:`~.cudaOccupancyDefault`: keeps the default behavior as\n :py:obj:`~.cudaOccupancyMaxActiveBlocksPerMultiprocessor`\n\n- :py:obj:`~.cudaOccupancyDisableCachingOverride`: This flag suppresses\n the default behavior on platform where global caching affects\n occupancy. On such platforms, if caching is enabled, but per-block SM\n resource usage would result in zero occupancy, the occupancy\n calculator will calculate the occupancy as if caching is disabled.\n Setting this flag makes the occupancy calculator to return 0 in such\n cases. More information can be found about this feature in the\n \"Unified L1/Texture Cache\" section of the Maxwell tuning guide.\n\nParameters\n----------\nfunc : Any\n Kernel function for which occupancy is calculated\nblockSize : int\n Block size the kernel is intended to be launched with\ndynamicSMemSize : size_t\n Per-block dynamic shared memory usage intended, in bytes\nflags : unsigned int\n Requested behavior for the occupancy calculator\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidDevice`, :py:obj:`~.cudaErrorInvalidDeviceFunction`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorUnknown`,\nnumBlocks : int\n Returned occupancy\n\nSee Also\n--------\n:py:obj:`~.cudaOccupancyMaxActiveBlocksPerMultiprocessor`, cudaOccupancyMaxPotentialBlockSize (C++ API), cudaOccupancyMaxPotentialBlockSizeWithFlags (C++ API), cudaOccupancyMaxPotentialBlockSizeVariableSMem (C++ API), cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags (C++ API), cudaOccupancyAvailableDynamicSMem""PerBlock (C++ API), :py:obj:`~.cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_177cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags = {"cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_177cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_176cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_177cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_func = 0; int __pyx_v_blockSize; size_t __pyx_v_dynamicSMemSize; unsigned int __pyx_v_flags; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[4] = {0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_func_2,&__pyx_mstate_global->__pyx_n_u_blockSize,&__pyx_mstate_global->__pyx_n_u_dynamicSMemSize,&__pyx_mstate_global->__pyx_n_u_flags_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19399, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 19399, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 19399, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 19399, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19399, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", 0) < (0)) __PYX_ERR(0, 19399, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 4; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", 1, 4, 4, i); __PYX_ERR(0, 19399, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 4)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19399, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 19399, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 19399, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 19399, __pyx_L3_error) } __pyx_v_func = values[0]; __pyx_v_blockSize = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_blockSize == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19400, __pyx_L3_error) __pyx_v_dynamicSMemSize = __Pyx_PyLong_As_size_t(values[2]); if (unlikely((__pyx_v_dynamicSMemSize == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 19400, __pyx_L3_error) __pyx_v_flags = __Pyx_PyLong_As_unsigned_int(values[3]); if (unlikely((__pyx_v_flags == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19400, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", 1, 4, 4, __pyx_nargs); __PYX_ERR(0, 19399, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_176cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(__pyx_self, __pyx_v_func, __pyx_v_blockSize, __pyx_v_dynamicSMemSize, __pyx_v_flags); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_176cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_func, int __pyx_v_blockSize, size_t __pyx_v_dynamicSMemSize, unsigned int __pyx_v_flags) { int __pyx_v_numBlocks; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cyfunc = NULL; void *__pyx_v_cyfunc_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_5; cudaError_t __pyx_t_6; int __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", 0); /* "cuda/bindings/runtime.pyx":19443 * :py:obj:`~.cudaOccupancyMaxActiveBlocksPerMultiprocessor`, cudaOccupancyMaxPotentialBlockSize (C++ API), cudaOccupancyMaxPotentialBlockSizeWithFlags (C++ API), cudaOccupancyMaxPotentialBlockSizeVariableSMem (C++ API), cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags (C++ API), cudaOccupancyAvailableDynamicSMemPerBlock (C++ API), :py:obj:`~.cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags` * """ * cdef int numBlocks = 0 # <<<<<<<<<<<<<< * cyfunc = _HelperInputVoidPtr(func) * cdef void* cyfunc_ptr = cyfunc.cptr */ __pyx_v_numBlocks = 0; /* "cuda/bindings/runtime.pyx":19444 * """ * cdef int numBlocks = 0 * cyfunc = _HelperInputVoidPtr(func) # <<<<<<<<<<<<<< * cdef void* cyfunc_ptr = cyfunc.cptr * with nogil: */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_func}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19444, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_cyfunc = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":19445 * cdef int numBlocks = 0 * cyfunc = _HelperInputVoidPtr(func) * cdef void* cyfunc_ptr = cyfunc.cptr # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(&numBlocks, cyfunc_ptr, blockSize, dynamicSMemSize, flags) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cyfunc), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19445, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_5 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 19445, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cyfunc_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_5)); /* "cuda/bindings/runtime.pyx":19446 * cyfunc = _HelperInputVoidPtr(func) * cdef void* cyfunc_ptr = cyfunc.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(&numBlocks, cyfunc_ptr, blockSize, dynamicSMemSize, flags) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":19447 * cdef void* cyfunc_ptr = cyfunc.cptr * with nogil: * err = cyruntime.cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(&numBlocks, cyfunc_ptr, blockSize, dynamicSMemSize, flags) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_6 = __pyx_f_4cuda_8bindings_9cyruntime_cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags((&__pyx_v_numBlocks), __pyx_v_cyfunc_ptr, __pyx_v_blockSize, __pyx_v_dynamicSMemSize, __pyx_v_flags); if (unlikely(__pyx_t_6 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19447, __pyx_L4_error) __pyx_v_err = __pyx_t_6; } /* "cuda/bindings/runtime.pyx":19446 * cyfunc = _HelperInputVoidPtr(func) * cdef void* cyfunc_ptr = cyfunc.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(&numBlocks, cyfunc_ptr, blockSize, dynamicSMemSize, flags) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":19448 * with nogil: * err = cyruntime.cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(&numBlocks, cyfunc_ptr, blockSize, dynamicSMemSize, flags) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], numBlocks) */ __pyx_t_7 = (__pyx_v_err != cudaSuccess); if (__pyx_t_7) { /* "cuda/bindings/runtime.pyx":19449 * err = cyruntime.cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(&numBlocks, cyfunc_ptr, blockSize, dynamicSMemSize, flags) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], numBlocks) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19449, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19449, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19449, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19449, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 19449, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 19449, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":19448 * with nogil: * err = cyruntime.cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(&numBlocks, cyfunc_ptr, blockSize, dynamicSMemSize, flags) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], numBlocks) */ } /* "cuda/bindings/runtime.pyx":19450 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], numBlocks) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19450, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19450, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19450, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyLong_From_int(__pyx_v_numBlocks); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19450, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19450, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 19450, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2) != (0)) __PYX_ERR(0, 19450, __pyx_L1_error); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":19399 * return (_dict_cudaError_t[err], dynamicSmemSize) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(func, int blockSize, size_t dynamicSMemSize, unsigned int flags): * """ Returns occupancy for a device function with the specified flags. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_cyfunc); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":19452 * return (_dict_cudaError_t[err], numBlocks) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMallocManaged(size_t size, unsigned int flags): * """ Allocates memory that will be automatically managed by the Unified Memory system. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_179cudaMallocManaged(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_178cudaMallocManaged, "cudaMallocManaged(size_t size, unsigned int flags)\n\nAllocates memory that will be automatically managed by the Unified Memory system.\n\nAllocates `size` bytes of managed memory on the device and returns in\n`*devPtr` a pointer to the allocated memory. If the device doesn't\nsupport allocating managed memory, :py:obj:`~.cudaErrorNotSupported` is\nreturned. Support for managed memory can be queried using the device\nattribute :py:obj:`~.cudaDevAttrManagedMemory`. The allocated memory is\nsuitably aligned for any kind of variable. The memory is not cleared.\nIf `size` is 0, :py:obj:`~.cudaMallocManaged` returns\n:py:obj:`~.cudaErrorInvalidValue`. The pointer is valid on the CPU and\non all GPUs in the system that support managed memory. All accesses to\nthis pointer must obey the Unified Memory programming model.\n\n`flags` specifies the default stream association for this allocation.\n`flags` must be one of :py:obj:`~.cudaMemAttachGlobal` or\n:py:obj:`~.cudaMemAttachHost`. The default value for `flags` is\n:py:obj:`~.cudaMemAttachGlobal`. If :py:obj:`~.cudaMemAttachGlobal` is\nspecified, then this memory is accessible from any stream on any\ndevice. If :py:obj:`~.cudaMemAttachHost` is specified, then the\nallocation should not be accessed from devices that have a zero value\nfor the device attribute\n:py:obj:`~.cudaDevAttrConcurrentManagedAccess`; an explicit call to\n:py:obj:`~.cudaStreamAttachMemAsync` will be required to enable access\non such devices.\n\nIf the association is later changed via\n:py:obj:`~.cudaStreamAttachMemAsync` to a single stream, the default\nassociation, as specifed during :py:obj:`~.cudaMallocManaged`, is\nrestored when that stream is destroyed. For managed variables, the\ndefault association is always :py:obj:`~.cudaMemAttachGlobal`. Note\nthat destroying a stream is an asynchronous operation, and as a result,\nthe change to default association won't happen until all work in the\nstream has completed.\n\nMemory allocated with :py:obj:`~.""cudaMallocManaged` should be released\nwith :py:obj:`~.cudaFree`.\n\nDevice memory oversubscription is possible for GPUs that have a non-\nzero value for the device attribute\n:py:obj:`~.cudaDevAttrConcurrentManagedAccess`. Managed memory on such\nGPUs may be evicted from device memory to host memory at any time by\nthe Unified Memory driver in order to make room for other allocations.\n\nIn a system where all GPUs have a non-zero value for the device\nattribute :py:obj:`~.cudaDevAttrConcurrentManagedAccess`, managed\nmemory may not be populated when this API returns and instead may be\npopulated on access. In such systems, managed memory can migrate to any\nprocessor's memory at any time. The Unified Memory driver will employ\nheuristics to maintain data locality and prevent excessive page faults\nto the extent possible. The application can also guide the driver about\nmemory usage patterns via :py:obj:`~.cudaMemAdvise`. The application\ncan also explicitly migrate memory to a desired processor's memory via\n:py:obj:`~.cudaMemPrefetchAsync`.\n\nIn a multi-GPU system where all of the GPUs have a zero value for the\ndevice attribute :py:obj:`~.cudaDevAttrConcurrentManagedAccess` and all\nthe GPUs have peer-to-peer support with each other, the physical\nstorage for managed memory is created on the GPU which is active at the\ntime :py:obj:`~.cudaMallocManaged` is called. All other GPUs will\nreference the data at reduced bandwidth via peer mappings over the PCIe\nbus. The Unified Memory driver does not migrate memory among such GPUs.\n\nIn a multi-GPU system where not all GPUs have peer-to-peer support with\neach other and where the value of the device attribute\n:py:obj:`~.cudaDevAttrConcurrentManagedAccess` is zero for at least one\nof those GPUs, the location chosen for physical storage of managed\nmemory is system-dependent.\n\n- On Linux, the location chosen will be device memory as long as the\n current set of active contexts are on devices that either have peer""-\n to-peer support with each other or have a non-zero value for the\n device attribute :py:obj:`~.cudaDevAttrConcurrentManagedAccess`. If\n there is an active context on a GPU that does not have a non-zero\n value for that device attribute and it does not have peer-to-peer\n support with the other devices that have active contexts on them,\n then the location for physical storage will be 'zero-copy' or host\n memory. Note that this means that managed memory that is located in\n device memory is migrated to host memory if a new context is created\n on a GPU that doesn't have a non-zero value for the device attribute\n and does not support peer-to-peer with at least one of the other\n devices that has an active context. This in turn implies that context\n creation may fail if there is insufficient host memory to migrate all\n managed allocations.\n\n- On Windows, the physical storage is always created in 'zero-copy' or\n host memory. All GPUs will reference the data at reduced bandwidth\n over the PCIe bus. In these circumstances, use of the environment\n variable CUDA_VISIBLE_DEVICES is recommended to restrict CUDA to only\n use those GPUs that have peer-to-peer support. Alternatively, users\n can also set CUDA_MANAGED_FORCE_DEVICE_ALLOC to a non-zero value to\n force the driver to always use device memory for physical storage.\n When this environment variable is set to a non-zero value, all\n devices used in that process that support managed memory have to be\n peer-to-peer compatible with each other. The error\n :py:obj:`~.cudaErrorInvalidDevice` will be returned if a device that\n supports managed memory is used and it is not peer-to-peer compatible\n with any of the other managed memory supporting devices that were\n previously used in that process, even if :py:obj:`~.cudaDeviceReset`\n has been called on those devices. These environment variables are\n described in the CUDA programming guide under the \"CUDA environment\n variables""\" section.\n\nParameters\n----------\nsize : size_t\n Requested allocation size in bytes\nflags : unsigned int\n Must be either :py:obj:`~.cudaMemAttachGlobal` or\n :py:obj:`~.cudaMemAttachHost` (defaults to\n :py:obj:`~.cudaMemAttachGlobal`)\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorMemoryAllocation`, :py:obj:`~.cudaErrorNotSupported`, :py:obj:`~.cudaErrorInvalidValue`\ndevPtr : Any\n Pointer to allocated device memory\n\nSee Also\n--------\n:py:obj:`~.cudaMallocPitch`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaMallocArray`, :py:obj:`~.cudaFreeArray`, :py:obj:`~.cudaMalloc3D`, :py:obj:`~.cudaMalloc3DArray`, :py:obj:`~.cudaMallocHost (C API)`, :py:obj:`~.cudaFreeHost`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.cudaDeviceGetAttribute`, :py:obj:`~.cudaStreamAttachMemAsync`, :py:obj:`~.cuMemAllocManaged`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_179cudaMallocManaged = {"cudaMallocManaged", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_179cudaMallocManaged, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_178cudaMallocManaged}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_179cudaMallocManaged(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { size_t __pyx_v_size; unsigned int __pyx_v_flags; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMallocManaged (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_size_2,&__pyx_mstate_global->__pyx_n_u_flags_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19452, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 19452, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19452, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMallocManaged", 0) < (0)) __PYX_ERR(0, 19452, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMallocManaged", 1, 2, 2, i); __PYX_ERR(0, 19452, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19452, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 19452, __pyx_L3_error) } __pyx_v_size = __Pyx_PyLong_As_size_t(values[0]); if (unlikely((__pyx_v_size == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 19453, __pyx_L3_error) __pyx_v_flags = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_flags == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19453, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMallocManaged", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 19452, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMallocManaged", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_178cudaMallocManaged(__pyx_self, __pyx_v_size, __pyx_v_flags); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_178cudaMallocManaged(CYTHON_UNUSED PyObject *__pyx_self, size_t __pyx_v_size, unsigned int __pyx_v_flags) { __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v_devPtr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations cudaError_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMallocManaged", 0); /* "cuda/bindings/runtime.pyx":19576 * :py:obj:`~.cudaMallocPitch`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaMallocArray`, :py:obj:`~.cudaFreeArray`, :py:obj:`~.cudaMalloc3D`, :py:obj:`~.cudaMalloc3DArray`, :py:obj:`~.cudaMallocHost (C API)`, :py:obj:`~.cudaFreeHost`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.cudaDeviceGetAttribute`, :py:obj:`~.cudaStreamAttachMemAsync`, :py:obj:`~.cuMemAllocManaged` * """ * cdef void_ptr devPtr = 0 # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMallocManaged(&devPtr, size, flags) */ __pyx_v_devPtr = 0; /* "cuda/bindings/runtime.pyx":19577 * """ * cdef void_ptr devPtr = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMallocManaged(&devPtr, size, flags) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":19578 * cdef void_ptr devPtr = 0 * with nogil: * err = cyruntime.cudaMallocManaged(&devPtr, size, flags) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_1 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMallocManaged(((void **)(&__pyx_v_devPtr)), __pyx_v_size, __pyx_v_flags); if (unlikely(__pyx_t_1 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19578, __pyx_L4_error) __pyx_v_err = __pyx_t_1; } /* "cuda/bindings/runtime.pyx":19577 * """ * cdef void_ptr devPtr = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMallocManaged(&devPtr, size, flags) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":19579 * with nogil: * err = cyruntime.cudaMallocManaged(&devPtr, size, flags) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], devPtr) */ __pyx_t_2 = (__pyx_v_err != cudaSuccess); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":19580 * err = cyruntime.cudaMallocManaged(&devPtr, size, flags) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], devPtr) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19580, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19580, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19580, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19580, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 19580, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, Py_None) != (0)) __PYX_ERR(0, 19580, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":19579 * with nogil: * err = cyruntime.cudaMallocManaged(&devPtr, size, flags) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], devPtr) */ } /* "cuda/bindings/runtime.pyx":19581 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], devPtr) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19581, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19581, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19581, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(__pyx_v_devPtr); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19581, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19581, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 19581, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_5) != (0)) __PYX_ERR(0, 19581, __pyx_L1_error); __pyx_t_3 = 0; __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":19452 * return (_dict_cudaError_t[err], numBlocks) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMallocManaged(size_t size, unsigned int flags): * """ Allocates memory that will be automatically managed by the Unified Memory system. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMallocManaged", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":19583 * return (_dict_cudaError_t[err], devPtr) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMalloc(size_t size): * """ Allocate memory on the device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_181cudaMalloc(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_180cudaMalloc, "cudaMalloc(size_t size)\n\nAllocate memory on the device.\n\nAllocates `size` bytes of linear memory on the device and returns in\n`*devPtr` a pointer to the allocated memory. The allocated memory is\nsuitably aligned for any kind of variable. The memory is not cleared.\n:py:obj:`~.cudaMalloc()` returns :py:obj:`~.cudaErrorMemoryAllocation`\nin case of failure.\n\nThe device version of :py:obj:`~.cudaFree` cannot be used with a\n`*devPtr` allocated using the host API, and vice versa.\n\nParameters\n----------\nsize : size_t\n Requested allocation size in bytes\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorMemoryAllocation`\ndevPtr : Any\n Pointer to allocated device memory\n\nSee Also\n--------\n:py:obj:`~.cudaMallocPitch`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaMallocArray`, :py:obj:`~.cudaFreeArray`, :py:obj:`~.cudaMalloc3D`, :py:obj:`~.cudaMalloc3DArray`, :py:obj:`~.cudaMallocHost (C API)`, :py:obj:`~.cudaFreeHost`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.cuMemAlloc`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_181cudaMalloc = {"cudaMalloc", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_181cudaMalloc, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_180cudaMalloc}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_181cudaMalloc(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { size_t __pyx_v_size; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMalloc (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_size_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19583, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19583, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMalloc", 0) < (0)) __PYX_ERR(0, 19583, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMalloc", 1, 1, 1, i); __PYX_ERR(0, 19583, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19583, __pyx_L3_error) } __pyx_v_size = __Pyx_PyLong_As_size_t(values[0]); if (unlikely((__pyx_v_size == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 19584, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMalloc", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 19583, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMalloc", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_180cudaMalloc(__pyx_self, __pyx_v_size); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_180cudaMalloc(CYTHON_UNUSED PyObject *__pyx_self, size_t __pyx_v_size) { __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v_devPtr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations cudaError_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMalloc", 0); /* "cuda/bindings/runtime.pyx":19612 * :py:obj:`~.cudaMallocPitch`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaMallocArray`, :py:obj:`~.cudaFreeArray`, :py:obj:`~.cudaMalloc3D`, :py:obj:`~.cudaMalloc3DArray`, :py:obj:`~.cudaMallocHost (C API)`, :py:obj:`~.cudaFreeHost`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.cuMemAlloc` * """ * cdef void_ptr devPtr = 0 # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMalloc(&devPtr, size) */ __pyx_v_devPtr = 0; /* "cuda/bindings/runtime.pyx":19613 * """ * cdef void_ptr devPtr = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMalloc(&devPtr, size) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":19614 * cdef void_ptr devPtr = 0 * with nogil: * err = cyruntime.cudaMalloc(&devPtr, size) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_1 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMalloc(((void **)(&__pyx_v_devPtr)), __pyx_v_size); if (unlikely(__pyx_t_1 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19614, __pyx_L4_error) __pyx_v_err = __pyx_t_1; } /* "cuda/bindings/runtime.pyx":19613 * """ * cdef void_ptr devPtr = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMalloc(&devPtr, size) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":19615 * with nogil: * err = cyruntime.cudaMalloc(&devPtr, size) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], devPtr) */ __pyx_t_2 = (__pyx_v_err != cudaSuccess); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":19616 * err = cyruntime.cudaMalloc(&devPtr, size) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], devPtr) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 19616, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, Py_None) != (0)) __PYX_ERR(0, 19616, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":19615 * with nogil: * err = cyruntime.cudaMalloc(&devPtr, size) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], devPtr) */ } /* "cuda/bindings/runtime.pyx":19617 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], devPtr) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19617, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19617, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19617, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(__pyx_v_devPtr); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19617, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19617, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 19617, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_5) != (0)) __PYX_ERR(0, 19617, __pyx_L1_error); __pyx_t_3 = 0; __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":19583 * return (_dict_cudaError_t[err], devPtr) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMalloc(size_t size): * """ Allocate memory on the device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMalloc", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":19619 * return (_dict_cudaError_t[err], devPtr) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMallocHost(size_t size): * """ Allocates page-locked memory on the host. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_183cudaMallocHost(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_182cudaMallocHost, "cudaMallocHost(size_t size)\n\nAllocates page-locked memory on the host.\n\nAllocates `size` bytes of host memory that is page-locked and\naccessible to the device. The driver tracks the virtual memory ranges\nallocated with this function and automatically accelerates calls to\nfunctions such as :py:obj:`~.cudaMemcpy`*(). Since the memory can be\naccessed directly by the device, it can be read or written with much\nhigher bandwidth than pageable memory obtained with functions such as\n:py:obj:`~.malloc()`.\n\nOn systems where :py:obj:`~.pageableMemoryAccessUsesHostPageTables` is\ntrue, :py:obj:`~.cudaMallocHost` may not page-lock the allocated\nmemory.\n\nPage-locking excessive amounts of memory with\n:py:obj:`~.cudaMallocHost()` may degrade system performance, since it\nreduces the amount of memory available to the system for paging. As a\nresult, this function is best used sparingly to allocate staging areas\nfor data exchange between host and device.\n\nParameters\n----------\nsize : size_t\n Requested allocation size in bytes\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorMemoryAllocation`\nptr : Any\n Pointer to allocated host memory\n\nSee Also\n--------\n:py:obj:`~.cudaMalloc`, :py:obj:`~.cudaMallocPitch`, :py:obj:`~.cudaMallocArray`, :py:obj:`~.cudaMalloc3D`, :py:obj:`~.cudaMalloc3DArray`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaFreeArray`, cudaMallocHost (C++ API), :py:obj:`~.cudaFreeHost`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.cuMemAllocHost`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_183cudaMallocHost = {"cudaMallocHost", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_183cudaMallocHost, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_182cudaMallocHost}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_183cudaMallocHost(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { size_t __pyx_v_size; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMallocHost (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_size_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19619, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19619, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMallocHost", 0) < (0)) __PYX_ERR(0, 19619, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMallocHost", 1, 1, 1, i); __PYX_ERR(0, 19619, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19619, __pyx_L3_error) } __pyx_v_size = __Pyx_PyLong_As_size_t(values[0]); if (unlikely((__pyx_v_size == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 19620, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMallocHost", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 19619, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMallocHost", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_182cudaMallocHost(__pyx_self, __pyx_v_size); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_182cudaMallocHost(CYTHON_UNUSED PyObject *__pyx_self, size_t __pyx_v_size) { __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations cudaError_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMallocHost", 0); /* "cuda/bindings/runtime.pyx":19657 * :py:obj:`~.cudaMalloc`, :py:obj:`~.cudaMallocPitch`, :py:obj:`~.cudaMallocArray`, :py:obj:`~.cudaMalloc3D`, :py:obj:`~.cudaMalloc3DArray`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaFreeArray`, cudaMallocHost (C++ API), :py:obj:`~.cudaFreeHost`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.cuMemAllocHost` * """ * cdef void_ptr ptr = 0 # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMallocHost(&ptr, size) */ __pyx_v_ptr = 0; /* "cuda/bindings/runtime.pyx":19658 * """ * cdef void_ptr ptr = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMallocHost(&ptr, size) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":19659 * cdef void_ptr ptr = 0 * with nogil: * err = cyruntime.cudaMallocHost(&ptr, size) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_1 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMallocHost(((void **)(&__pyx_v_ptr)), __pyx_v_size); if (unlikely(__pyx_t_1 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19659, __pyx_L4_error) __pyx_v_err = __pyx_t_1; } /* "cuda/bindings/runtime.pyx":19658 * """ * cdef void_ptr ptr = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMallocHost(&ptr, size) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":19660 * with nogil: * err = cyruntime.cudaMallocHost(&ptr, size) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], ptr) */ __pyx_t_2 = (__pyx_v_err != cudaSuccess); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":19661 * err = cyruntime.cudaMallocHost(&ptr, size) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], ptr) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19661, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19661, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19661, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19661, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 19661, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, Py_None) != (0)) __PYX_ERR(0, 19661, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":19660 * with nogil: * err = cyruntime.cudaMallocHost(&ptr, size) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], ptr) */ } /* "cuda/bindings/runtime.pyx":19662 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], ptr) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19662, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19662, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19662, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(__pyx_v_ptr); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19662, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19662, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 19662, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_5) != (0)) __PYX_ERR(0, 19662, __pyx_L1_error); __pyx_t_3 = 0; __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":19619 * return (_dict_cudaError_t[err], devPtr) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMallocHost(size_t size): * """ Allocates page-locked memory on the host. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMallocHost", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":19664 * return (_dict_cudaError_t[err], ptr) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMallocPitch(size_t width, size_t height): * """ Allocates pitched memory on the device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_185cudaMallocPitch(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_184cudaMallocPitch, "cudaMallocPitch(size_t width, size_t height)\n\nAllocates pitched memory on the device.\n\nAllocates at least `width` (in bytes) * `height` bytes of linear memory\non the device and returns in `*devPtr` a pointer to the allocated\nmemory. The function may pad the allocation to ensure that\ncorresponding pointers in any given row will continue to meet the\nalignment requirements for coalescing as the address is updated from\nrow to row. The pitch returned in `*pitch` by\n:py:obj:`~.cudaMallocPitch()` is the width in bytes of the allocation.\nThe intended usage of `pitch` is as a separate parameter of the\nallocation, used to compute addresses within the 2D array. Given the\nrow and column of an array element of type `T`, the address is computed\nas:\n\n**View CUDA Toolkit Documentation for a C++ code example**\n\nFor allocations of 2D arrays, it is recommended that programmers\nconsider performing pitch allocations using\n:py:obj:`~.cudaMallocPitch()`. Due to pitch alignment restrictions in\nthe hardware, this is especially true if the application will be\nperforming 2D memory copies between different regions of device memory\n(whether linear memory or CUDA arrays).\n\nParameters\n----------\nwidth : size_t\n Requested pitched allocation width (in bytes)\nheight : size_t\n Requested pitched allocation height\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorMemoryAllocation`\ndevPtr : Any\n Pointer to allocated pitched device memory\npitch : int\n Pitch for allocation\n\nSee Also\n--------\n:py:obj:`~.cudaMalloc`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaMallocArray`, :py:obj:`~.cudaFreeArray`, :py:obj:`~.cudaMallocHost (C API)`, :py:obj:`~.cudaFreeHost`, :py:obj:`~.cudaMalloc3D`, :py:obj:`~.cudaMalloc3DArray`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.cuMemAllocPitch`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_185cudaMallocPitch = {"cudaMallocPitch", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_185cudaMallocPitch, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_184cudaMallocPitch}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_185cudaMallocPitch(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { size_t __pyx_v_width; size_t __pyx_v_height; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMallocPitch (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_width_2,&__pyx_mstate_global->__pyx_n_u_height_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19664, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 19664, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19664, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMallocPitch", 0) < (0)) __PYX_ERR(0, 19664, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMallocPitch", 1, 2, 2, i); __PYX_ERR(0, 19664, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19664, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 19664, __pyx_L3_error) } __pyx_v_width = __Pyx_PyLong_As_size_t(values[0]); if (unlikely((__pyx_v_width == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 19665, __pyx_L3_error) __pyx_v_height = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_height == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 19665, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMallocPitch", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 19664, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMallocPitch", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_184cudaMallocPitch(__pyx_self, __pyx_v_width, __pyx_v_height); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_184cudaMallocPitch(CYTHON_UNUSED PyObject *__pyx_self, size_t __pyx_v_width, size_t __pyx_v_height) { __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v_devPtr; size_t __pyx_v_pitch; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations cudaError_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMallocPitch", 0); /* "cuda/bindings/runtime.pyx":19709 * :py:obj:`~.cudaMalloc`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaMallocArray`, :py:obj:`~.cudaFreeArray`, :py:obj:`~.cudaMallocHost (C API)`, :py:obj:`~.cudaFreeHost`, :py:obj:`~.cudaMalloc3D`, :py:obj:`~.cudaMalloc3DArray`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.cuMemAllocPitch` * """ * cdef void_ptr devPtr = 0 # <<<<<<<<<<<<<< * cdef size_t pitch = 0 * with nogil: */ __pyx_v_devPtr = 0; /* "cuda/bindings/runtime.pyx":19710 * """ * cdef void_ptr devPtr = 0 * cdef size_t pitch = 0 # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMallocPitch(&devPtr, &pitch, width, height) */ __pyx_v_pitch = 0; /* "cuda/bindings/runtime.pyx":19711 * cdef void_ptr devPtr = 0 * cdef size_t pitch = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMallocPitch(&devPtr, &pitch, width, height) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":19712 * cdef size_t pitch = 0 * with nogil: * err = cyruntime.cudaMallocPitch(&devPtr, &pitch, width, height) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None, None) */ __pyx_t_1 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMallocPitch(((void **)(&__pyx_v_devPtr)), (&__pyx_v_pitch), __pyx_v_width, __pyx_v_height); if (unlikely(__pyx_t_1 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19712, __pyx_L4_error) __pyx_v_err = __pyx_t_1; } /* "cuda/bindings/runtime.pyx":19711 * cdef void_ptr devPtr = 0 * cdef size_t pitch = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMallocPitch(&devPtr, &pitch, width, height) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":19713 * with nogil: * err = cyruntime.cudaMallocPitch(&devPtr, &pitch, width, height) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None, None) * return (_dict_cudaError_t[err], devPtr, pitch) */ __pyx_t_2 = (__pyx_v_err != cudaSuccess); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":19714 * err = cyruntime.cudaMallocPitch(&devPtr, &pitch, width, height) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None, None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], devPtr, pitch) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19714, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19714, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19714, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19714, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 19714, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, Py_None) != (0)) __PYX_ERR(0, 19714, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 2, Py_None) != (0)) __PYX_ERR(0, 19714, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":19713 * with nogil: * err = cyruntime.cudaMallocPitch(&devPtr, &pitch, width, height) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None, None) * return (_dict_cudaError_t[err], devPtr, pitch) */ } /* "cuda/bindings/runtime.pyx":19715 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None, None) * return (_dict_cudaError_t[err], devPtr, pitch) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19715, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19715, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19715, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(__pyx_v_devPtr); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19715, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyLong_FromSize_t(__pyx_v_pitch); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19715, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyTuple_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 19715, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 19715, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_5) != (0)) __PYX_ERR(0, 19715, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 2, __pyx_t_4) != (0)) __PYX_ERR(0, 19715, __pyx_L1_error); __pyx_t_3 = 0; __pyx_t_5 = 0; __pyx_t_4 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":19664 * return (_dict_cudaError_t[err], ptr) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMallocPitch(size_t width, size_t height): * """ Allocates pitched memory on the device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMallocPitch", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":19717 * return (_dict_cudaError_t[err], devPtr, pitch) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMallocArray(desc : Optional[cudaChannelFormatDesc], size_t width, size_t height, unsigned int flags): * """ Allocate an array on the device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_187cudaMallocArray(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_186cudaMallocArray, "cudaMallocArray(cudaChannelFormatDesc desc: Optional[cudaChannelFormatDesc], size_t width, size_t height, unsigned int flags)\n\nAllocate an array on the device.\n\nAllocates a CUDA array according to the\n:py:obj:`~.cudaChannelFormatDesc` structure `desc` and returns a handle\nto the new CUDA array in `*array`.\n\nThe :py:obj:`~.cudaChannelFormatDesc` is defined as:\n\n**View CUDA Toolkit Documentation for a C++ code example**\n\nwhere :py:obj:`~.cudaChannelFormatKind` is one of\n:py:obj:`~.cudaChannelFormatKindSigned`,\n:py:obj:`~.cudaChannelFormatKindUnsigned`, or\n:py:obj:`~.cudaChannelFormatKindFloat`.\n\nThe `flags` parameter enables different options to be specified that\naffect the allocation, as follows.\n\n- :py:obj:`~.cudaArrayDefault`: This flag's value is defined to be 0\n and provides default array allocation\n\n- :py:obj:`~.cudaArraySurfaceLoadStore`: Allocates an array that can be\n read from or written to using a surface reference\n\n- :py:obj:`~.cudaArrayTextureGather`: This flag indicates that texture\n gather operations will be performed on the array.\n\n- :py:obj:`~.cudaArraySparse`: Allocates a CUDA array without physical\n backing memory. The subregions within this sparse array can later be\n mapped onto a physical memory allocation by calling\n :py:obj:`~.cuMemMapArrayAsync`. The physical backing memory must be\n allocated via :py:obj:`~.cuMemCreate`.\n\n- :py:obj:`~.cudaArrayDeferredMapping`: Allocates a CUDA array without\n physical backing memory. The entire array can later be mapped onto a\n physical memory allocation by calling :py:obj:`~.cuMemMapArrayAsync`.\n The physical backing memory must be allocated via\n :py:obj:`~.cuMemCreate`.\n\n`width` and `height` must meet certain size requirements. See\n:py:obj:`~.cudaMalloc3DArray()` for more details.\n\nParameters\n----------\ndesc : :py:obj:`~.cudaChannelFormatDesc`\n Requested channel format\nwidth : size_t\n Requested array allocation width\nheight : size_t\n Reque""sted array allocation height\nflags : unsigned int\n Requested properties of allocated array\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorMemoryAllocation`\narray : :py:obj:`~.cudaArray_t`\n Pointer to allocated array in device memory\n\nSee Also\n--------\n:py:obj:`~.cudaMalloc`, :py:obj:`~.cudaMallocPitch`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaFreeArray`, :py:obj:`~.cudaMallocHost (C API)`, :py:obj:`~.cudaFreeHost`, :py:obj:`~.cudaMalloc3D`, :py:obj:`~.cudaMalloc3DArray`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.cuArrayCreate`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_187cudaMallocArray = {"cudaMallocArray", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_187cudaMallocArray, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_186cudaMallocArray}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_187cudaMallocArray(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaChannelFormatDesc *__pyx_v_desc = 0; size_t __pyx_v_width; size_t __pyx_v_height; unsigned int __pyx_v_flags; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[4] = {0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMallocArray (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_desc_2,&__pyx_mstate_global->__pyx_n_u_width_2,&__pyx_mstate_global->__pyx_n_u_height_2,&__pyx_mstate_global->__pyx_n_u_flags_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19717, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 19717, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 19717, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 19717, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19717, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMallocArray", 0) < (0)) __PYX_ERR(0, 19717, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 4; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMallocArray", 1, 4, 4, i); __PYX_ERR(0, 19717, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 4)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19717, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 19717, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 19717, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 19717, __pyx_L3_error) } __pyx_v_desc = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaChannelFormatDesc *)values[0]); __pyx_v_width = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_width == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 19718, __pyx_L3_error) __pyx_v_height = __Pyx_PyLong_As_size_t(values[2]); if (unlikely((__pyx_v_height == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 19718, __pyx_L3_error) __pyx_v_flags = __Pyx_PyLong_As_unsigned_int(values[3]); if (unlikely((__pyx_v_flags == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19718, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMallocArray", 1, 4, 4, __pyx_nargs); __PYX_ERR(0, 19717, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMallocArray", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_desc), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaChannelFormatDesc, 1, "desc", 0))) __PYX_ERR(0, 19718, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_186cudaMallocArray(__pyx_self, __pyx_v_desc, __pyx_v_width, __pyx_v_height, __pyx_v_flags); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_186cudaMallocArray(CYTHON_UNUSED PyObject *__pyx_self, struct __pyx_obj_4cuda_8bindings_7runtime_cudaChannelFormatDesc *__pyx_v_desc, size_t __pyx_v_width, size_t __pyx_v_height, unsigned int __pyx_v_flags) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaArray_t *__pyx_v_array = 0; struct cudaChannelFormatDesc *__pyx_v_cydesc_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; struct cudaChannelFormatDesc *__pyx_t_5; int __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMallocArray", 0); /* "cuda/bindings/runtime.pyx":19783 * :py:obj:`~.cudaMalloc`, :py:obj:`~.cudaMallocPitch`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaFreeArray`, :py:obj:`~.cudaMallocHost (C API)`, :py:obj:`~.cudaFreeHost`, :py:obj:`~.cudaMalloc3D`, :py:obj:`~.cudaMalloc3DArray`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.cuArrayCreate` * """ * cdef cudaArray_t array = cudaArray_t() # <<<<<<<<<<<<<< * cdef cyruntime.cudaChannelFormatDesc* cydesc_ptr = desc._pvt_ptr if desc is not None else NULL * with nogil: */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (1-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19783, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_array = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaArray_t *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":19784 * """ * cdef cudaArray_t array = cudaArray_t() * cdef cyruntime.cudaChannelFormatDesc* cydesc_ptr = desc._pvt_ptr if desc is not None else NULL # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMallocArray(array._pvt_ptr, cydesc_ptr, width, height, flags) */ __pyx_t_6 = (((PyObject *)__pyx_v_desc) != Py_None); if (__pyx_t_6) { __pyx_t_5 = __pyx_v_desc->_pvt_ptr; } else { __pyx_t_5 = NULL; } __pyx_v_cydesc_ptr = __pyx_t_5; /* "cuda/bindings/runtime.pyx":19785 * cdef cudaArray_t array = cudaArray_t() * cdef cyruntime.cudaChannelFormatDesc* cydesc_ptr = desc._pvt_ptr if desc is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMallocArray(array._pvt_ptr, cydesc_ptr, width, height, flags) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":19786 * cdef cyruntime.cudaChannelFormatDesc* cydesc_ptr = desc._pvt_ptr if desc is not None else NULL * with nogil: * err = cyruntime.cudaMallocArray(array._pvt_ptr, cydesc_ptr, width, height, flags) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMallocArray(((cudaArray_t *)__pyx_v_array->_pvt_ptr), __pyx_v_cydesc_ptr, __pyx_v_width, __pyx_v_height, __pyx_v_flags); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19786, __pyx_L4_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":19785 * cdef cudaArray_t array = cudaArray_t() * cdef cyruntime.cudaChannelFormatDesc* cydesc_ptr = desc._pvt_ptr if desc is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMallocArray(array._pvt_ptr, cydesc_ptr, width, height, flags) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":19787 * with nogil: * err = cyruntime.cudaMallocArray(array._pvt_ptr, cydesc_ptr, width, height, flags) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], array) */ __pyx_t_6 = (__pyx_v_err != cudaSuccess); if (__pyx_t_6) { /* "cuda/bindings/runtime.pyx":19788 * err = cyruntime.cudaMallocArray(array._pvt_ptr, cydesc_ptr, width, height, flags) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], array) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19788, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19788, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19788, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19788, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 19788, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 19788, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":19787 * with nogil: * err = cyruntime.cudaMallocArray(array._pvt_ptr, cydesc_ptr, width, height, flags) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], array) */ } /* "cuda/bindings/runtime.pyx":19789 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], array) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19789, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19789, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19789, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19789, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 19789, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_array); __Pyx_GIVEREF((PyObject *)__pyx_v_array); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_array)) != (0)) __PYX_ERR(0, 19789, __pyx_L1_error); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":19717 * return (_dict_cudaError_t[err], devPtr, pitch) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMallocArray(desc : Optional[cudaChannelFormatDesc], size_t width, size_t height, unsigned int flags): * """ Allocate an array on the device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMallocArray", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_array); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":19791 * return (_dict_cudaError_t[err], array) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaFree(devPtr): * """ Frees memory on the device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_189cudaFree(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_188cudaFree, "cudaFree(devPtr)\n\nFrees memory on the device.\n\nFrees the memory space pointed to by `devPtr`, which must have been\nreturned by a previous call to one of the following memory allocation\nAPIs - :py:obj:`~.cudaMalloc()`, :py:obj:`~.cudaMallocPitch()`,\n:py:obj:`~.cudaMallocManaged()`, :py:obj:`~.cudaMallocAsync()`,\n:py:obj:`~.cudaMallocFromPoolAsync()`.\n\nNote - This API will not perform any implicit synchronization when the\npointer was allocated with :py:obj:`~.cudaMallocAsync` or\n:py:obj:`~.cudaMallocFromPoolAsync`. Callers must ensure that all\naccesses to these pointer have completed before invoking\n:py:obj:`~.cudaFree`. For best performance and memory reuse, users\nshould use :py:obj:`~.cudaFreeAsync` to free memory allocated via the\nstream ordered memory allocator. For all other pointers, this API may\nperform implicit synchronization.\n\nIf :py:obj:`~.cudaFree`(`devPtr`) has already been called before, an\nerror is returned. If `devPtr` is 0, no operation is performed.\n:py:obj:`~.cudaFree()` returns :py:obj:`~.cudaErrorValue` in case of\nfailure.\n\nThe device version of :py:obj:`~.cudaFree` cannot be used with a\n`*devPtr` allocated using the host API, and vice versa.\n\nParameters\n----------\ndevPtr : Any\n Device pointer to memory to free\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\n\nSee Also\n--------\n:py:obj:`~.cudaMalloc`, :py:obj:`~.cudaMallocPitch`, :py:obj:`~.cudaMallocManaged`, :py:obj:`~.cudaMallocArray`, :py:obj:`~.cudaFreeArray`, :py:obj:`~.cudaMallocAsync`, :py:obj:`~.cudaMallocFromPoolAsync` :py:obj:`~.cudaMallocHost (C API)`, :py:obj:`~.cudaFreeHost`, :py:obj:`~.cudaMalloc3D`, :py:obj:`~.cudaMalloc3DArray`, :py:obj:`~.cudaFreeAsync` :py:obj:`~.cudaHostAlloc`, :py:obj:`~.cuMemFree`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_189cudaFree = {"cudaFree", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_189cudaFree, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_188cudaFree}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_189cudaFree(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_devPtr = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaFree (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_devPtr_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19791, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19791, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaFree", 0) < (0)) __PYX_ERR(0, 19791, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaFree", 1, 1, 1, i); __PYX_ERR(0, 19791, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19791, __pyx_L3_error) } __pyx_v_devPtr = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaFree", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 19791, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaFree", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_188cudaFree(__pyx_self, __pyx_v_devPtr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_188cudaFree(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_devPtr) { struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cydevPtr = NULL; void *__pyx_v_cydevPtr_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_5; cudaError_t __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaFree", 0); /* "cuda/bindings/runtime.pyx":19832 * :py:obj:`~.cudaMalloc`, :py:obj:`~.cudaMallocPitch`, :py:obj:`~.cudaMallocManaged`, :py:obj:`~.cudaMallocArray`, :py:obj:`~.cudaFreeArray`, :py:obj:`~.cudaMallocAsync`, :py:obj:`~.cudaMallocFromPoolAsync` :py:obj:`~.cudaMallocHost (C API)`, :py:obj:`~.cudaFreeHost`, :py:obj:`~.cudaMalloc3D`, :py:obj:`~.cudaMalloc3DArray`, :py:obj:`~.cudaFreeAsync` :py:obj:`~.cudaHostAlloc`, :py:obj:`~.cuMemFree` * """ * cydevPtr = _HelperInputVoidPtr(devPtr) # <<<<<<<<<<<<<< * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_devPtr}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19832, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_cydevPtr = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":19833 * """ * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaFree(cydevPtr_ptr) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cydevPtr), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19833, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_5 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 19833, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cydevPtr_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_5)); /* "cuda/bindings/runtime.pyx":19834 * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaFree(cydevPtr_ptr) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":19835 * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: * err = cyruntime.cudaFree(cydevPtr_ptr) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_6 = __pyx_f_4cuda_8bindings_9cyruntime_cudaFree(__pyx_v_cydevPtr_ptr); if (unlikely(__pyx_t_6 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19835, __pyx_L4_error) __pyx_v_err = __pyx_t_6; } /* "cuda/bindings/runtime.pyx":19834 * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaFree(cydevPtr_ptr) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":19836 * with nogil: * err = cyruntime.cudaFree(cydevPtr_ptr) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19836, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19836, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19836, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19836, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 19836, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":19791 * return (_dict_cudaError_t[err], array) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaFree(devPtr): * """ Frees memory on the device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaFree", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_cydevPtr); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":19838 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaFreeHost(ptr): * """ Frees page-locked memory. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_191cudaFreeHost(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_190cudaFreeHost, "cudaFreeHost(ptr)\n\nFrees page-locked memory.\n\nFrees the memory space pointed to by `hostPtr`, which must have been\nreturned by a previous call to :py:obj:`~.cudaMallocHost()` or\n:py:obj:`~.cudaHostAlloc()`.\n\nParameters\n----------\nptr : Any\n Pointer to memory to free\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\n\nSee Also\n--------\n:py:obj:`~.cudaMalloc`, :py:obj:`~.cudaMallocPitch`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaMallocArray`, :py:obj:`~.cudaFreeArray`, :py:obj:`~.cudaMallocHost (C API)`, :py:obj:`~.cudaMalloc3D`, :py:obj:`~.cudaMalloc3DArray`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.cuMemFreeHost`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_191cudaFreeHost = {"cudaFreeHost", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_191cudaFreeHost, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_190cudaFreeHost}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_191cudaFreeHost(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_ptr = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaFreeHost (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19838, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19838, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaFreeHost", 0) < (0)) __PYX_ERR(0, 19838, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaFreeHost", 1, 1, 1, i); __PYX_ERR(0, 19838, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19838, __pyx_L3_error) } __pyx_v_ptr = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaFreeHost", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 19838, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaFreeHost", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_190cudaFreeHost(__pyx_self, __pyx_v_ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_190cudaFreeHost(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_ptr) { struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cyptr = NULL; void *__pyx_v_cyptr_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_5; cudaError_t __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaFreeHost", 0); /* "cuda/bindings/runtime.pyx":19860 * :py:obj:`~.cudaMalloc`, :py:obj:`~.cudaMallocPitch`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaMallocArray`, :py:obj:`~.cudaFreeArray`, :py:obj:`~.cudaMallocHost (C API)`, :py:obj:`~.cudaMalloc3D`, :py:obj:`~.cudaMalloc3DArray`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.cuMemFreeHost` * """ * cyptr = _HelperInputVoidPtr(ptr) # <<<<<<<<<<<<<< * cdef void* cyptr_ptr = cyptr.cptr * with nogil: */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_ptr}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19860, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_cyptr = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":19861 * """ * cyptr = _HelperInputVoidPtr(ptr) * cdef void* cyptr_ptr = cyptr.cptr # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaFreeHost(cyptr_ptr) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cyptr), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19861, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_5 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 19861, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cyptr_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_5)); /* "cuda/bindings/runtime.pyx":19862 * cyptr = _HelperInputVoidPtr(ptr) * cdef void* cyptr_ptr = cyptr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaFreeHost(cyptr_ptr) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":19863 * cdef void* cyptr_ptr = cyptr.cptr * with nogil: * err = cyruntime.cudaFreeHost(cyptr_ptr) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_6 = __pyx_f_4cuda_8bindings_9cyruntime_cudaFreeHost(__pyx_v_cyptr_ptr); if (unlikely(__pyx_t_6 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19863, __pyx_L4_error) __pyx_v_err = __pyx_t_6; } /* "cuda/bindings/runtime.pyx":19862 * cyptr = _HelperInputVoidPtr(ptr) * cdef void* cyptr_ptr = cyptr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaFreeHost(cyptr_ptr) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":19864 * with nogil: * err = cyruntime.cudaFreeHost(cyptr_ptr) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19864, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19864, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19864, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19864, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 19864, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":19838 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaFreeHost(ptr): * """ Frees page-locked memory. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaFreeHost", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_cyptr); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":19866 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaFreeArray(array): * """ Frees an array on the device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_193cudaFreeArray(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_192cudaFreeArray, "cudaFreeArray(array)\n\nFrees an array on the device.\n\nFrees the CUDA array `array`, which must have been returned by a\nprevious call to :py:obj:`~.cudaMallocArray()`. If `devPtr` is 0, no\noperation is performed.\n\nParameters\n----------\narray : :py:obj:`~.cudaArray_t`\n Pointer to array to free\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\n\nSee Also\n--------\n:py:obj:`~.cudaMalloc`, :py:obj:`~.cudaMallocPitch`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaMallocArray`, :py:obj:`~.cudaMallocHost (C API)`, :py:obj:`~.cudaFreeHost`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.cuArrayDestroy`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_193cudaFreeArray = {"cudaFreeArray", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_193cudaFreeArray, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_192cudaFreeArray}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_193cudaFreeArray(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_array = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaFreeArray (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_array_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19866, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19866, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaFreeArray", 0) < (0)) __PYX_ERR(0, 19866, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaFreeArray", 1, 1, 1, i); __PYX_ERR(0, 19866, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19866, __pyx_L3_error) } __pyx_v_array = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaFreeArray", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 19866, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaFreeArray", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_192cudaFreeArray(__pyx_self, __pyx_v_array); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_192cudaFreeArray(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_array) { cudaArray_t __pyx_v_cyarray; PyObject *__pyx_v_parray = NULL; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaFreeArray", 0); /* "cuda/bindings/runtime.pyx":19889 * """ * cdef cyruntime.cudaArray_t cyarray * if array is None: # <<<<<<<<<<<<<< * parray = 0 * elif isinstance(array, (cudaArray_t,)): */ __pyx_t_1 = (__pyx_v_array == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":19890 * cdef cyruntime.cudaArray_t cyarray * if array is None: * parray = 0 # <<<<<<<<<<<<<< * elif isinstance(array, (cudaArray_t,)): * parray = int(array) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_parray = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":19889 * """ * cdef cyruntime.cudaArray_t cyarray * if array is None: # <<<<<<<<<<<<<< * parray = 0 * elif isinstance(array, (cudaArray_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":19891 * if array is None: * parray = 0 * elif isinstance(array, (cudaArray_t,)): # <<<<<<<<<<<<<< * parray = int(array) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_array, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":19892 * parray = 0 * elif isinstance(array, (cudaArray_t,)): * parray = int(array) # <<<<<<<<<<<<<< * else: * parray = int(cudaArray_t(array)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_array); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19892, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_parray = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":19891 * if array is None: * parray = 0 * elif isinstance(array, (cudaArray_t,)): # <<<<<<<<<<<<<< * parray = int(array) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":19894 * parray = int(array) * else: * parray = int(cudaArray_t(array)) # <<<<<<<<<<<<<< * cyarray = parray * with nogil: */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_array}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19894, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19894, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_parray = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":19895 * else: * parray = int(cudaArray_t(array)) * cyarray = parray # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaFreeArray(cyarray) */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_parray); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 19895, __pyx_L1_error) __pyx_v_cyarray = ((cudaArray_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":19896 * parray = int(cudaArray_t(array)) * cyarray = parray * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaFreeArray(cyarray) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":19897 * cyarray = parray * with nogil: * err = cyruntime.cudaFreeArray(cyarray) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaFreeArray(__pyx_v_cyarray); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19897, __pyx_L5_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":19896 * parray = int(cudaArray_t(array)) * cyarray = parray * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaFreeArray(cyarray) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L6; } __pyx_L5_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L6:; } } /* "cuda/bindings/runtime.pyx":19898 * with nogil: * err = cyruntime.cudaFreeArray(cyarray) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19898, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19898, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19898, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19898, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 19898, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":19866 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaFreeArray(array): * """ Frees an array on the device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaFreeArray", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_parray); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":19900 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaFreeMipmappedArray(mipmappedArray): * """ Frees a mipmapped array on the device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_195cudaFreeMipmappedArray(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_194cudaFreeMipmappedArray, "cudaFreeMipmappedArray(mipmappedArray)\n\nFrees a mipmapped array on the device.\n\nFrees the CUDA mipmapped array `mipmappedArray`, which must have been\nreturned by a previous call to :py:obj:`~.cudaMallocMipmappedArray()`.\nIf `devPtr` is 0, no operation is performed.\n\nParameters\n----------\nmipmappedArray : :py:obj:`~.cudaMipmappedArray_t`\n Pointer to mipmapped array to free\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\n\nSee Also\n--------\n:py:obj:`~.cudaMalloc`, :py:obj:`~.cudaMallocPitch`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaMallocArray`, :py:obj:`~.cudaMallocHost (C API)`, :py:obj:`~.cudaFreeHost`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.cuMipmappedArrayDestroy`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_195cudaFreeMipmappedArray = {"cudaFreeMipmappedArray", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_195cudaFreeMipmappedArray, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_194cudaFreeMipmappedArray}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_195cudaFreeMipmappedArray(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_mipmappedArray = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaFreeMipmappedArray (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_mipmappedArray,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19900, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19900, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaFreeMipmappedArray", 0) < (0)) __PYX_ERR(0, 19900, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaFreeMipmappedArray", 1, 1, 1, i); __PYX_ERR(0, 19900, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19900, __pyx_L3_error) } __pyx_v_mipmappedArray = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaFreeMipmappedArray", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 19900, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaFreeMipmappedArray", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_194cudaFreeMipmappedArray(__pyx_self, __pyx_v_mipmappedArray); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_194cudaFreeMipmappedArray(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_mipmappedArray) { cudaMipmappedArray_t __pyx_v_cymipmappedArray; PyObject *__pyx_v_pmipmappedArray = NULL; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaFreeMipmappedArray", 0); /* "cuda/bindings/runtime.pyx":19923 * """ * cdef cyruntime.cudaMipmappedArray_t cymipmappedArray * if mipmappedArray is None: # <<<<<<<<<<<<<< * pmipmappedArray = 0 * elif isinstance(mipmappedArray, (cudaMipmappedArray_t,)): */ __pyx_t_1 = (__pyx_v_mipmappedArray == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":19924 * cdef cyruntime.cudaMipmappedArray_t cymipmappedArray * if mipmappedArray is None: * pmipmappedArray = 0 # <<<<<<<<<<<<<< * elif isinstance(mipmappedArray, (cudaMipmappedArray_t,)): * pmipmappedArray = int(mipmappedArray) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pmipmappedArray = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":19923 * """ * cdef cyruntime.cudaMipmappedArray_t cymipmappedArray * if mipmappedArray is None: # <<<<<<<<<<<<<< * pmipmappedArray = 0 * elif isinstance(mipmappedArray, (cudaMipmappedArray_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":19925 * if mipmappedArray is None: * pmipmappedArray = 0 * elif isinstance(mipmappedArray, (cudaMipmappedArray_t,)): # <<<<<<<<<<<<<< * pmipmappedArray = int(mipmappedArray) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_mipmappedArray, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMipmappedArray_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":19926 * pmipmappedArray = 0 * elif isinstance(mipmappedArray, (cudaMipmappedArray_t,)): * pmipmappedArray = int(mipmappedArray) # <<<<<<<<<<<<<< * else: * pmipmappedArray = int(cudaMipmappedArray_t(mipmappedArray)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_mipmappedArray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19926, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_pmipmappedArray = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":19925 * if mipmappedArray is None: * pmipmappedArray = 0 * elif isinstance(mipmappedArray, (cudaMipmappedArray_t,)): # <<<<<<<<<<<<<< * pmipmappedArray = int(mipmappedArray) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":19928 * pmipmappedArray = int(mipmappedArray) * else: * pmipmappedArray = int(cudaMipmappedArray_t(mipmappedArray)) # <<<<<<<<<<<<<< * cymipmappedArray = pmipmappedArray * with nogil: */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMipmappedArray_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMipmappedArray_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_mipmappedArray}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19928, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_pmipmappedArray = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":19929 * else: * pmipmappedArray = int(cudaMipmappedArray_t(mipmappedArray)) * cymipmappedArray = pmipmappedArray # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaFreeMipmappedArray(cymipmappedArray) */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pmipmappedArray); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 19929, __pyx_L1_error) __pyx_v_cymipmappedArray = ((cudaMipmappedArray_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":19930 * pmipmappedArray = int(cudaMipmappedArray_t(mipmappedArray)) * cymipmappedArray = pmipmappedArray * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaFreeMipmappedArray(cymipmappedArray) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":19931 * cymipmappedArray = pmipmappedArray * with nogil: * err = cyruntime.cudaFreeMipmappedArray(cymipmappedArray) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaFreeMipmappedArray(__pyx_v_cymipmappedArray); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19931, __pyx_L5_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":19930 * pmipmappedArray = int(cudaMipmappedArray_t(mipmappedArray)) * cymipmappedArray = pmipmappedArray * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaFreeMipmappedArray(cymipmappedArray) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L6; } __pyx_L5_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L6:; } } /* "cuda/bindings/runtime.pyx":19932 * with nogil: * err = cyruntime.cudaFreeMipmappedArray(cymipmappedArray) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19932, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19932, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19932, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19932, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 19932, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":19900 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaFreeMipmappedArray(mipmappedArray): * """ Frees a mipmapped array on the device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaFreeMipmappedArray", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pmipmappedArray); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":19934 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaHostAlloc(size_t size, unsigned int flags): * """ Allocates page-locked memory on the host. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_197cudaHostAlloc(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_196cudaHostAlloc, "cudaHostAlloc(size_t size, unsigned int flags)\n\nAllocates page-locked memory on the host.\n\nAllocates `size` bytes of host memory that is page-locked and\naccessible to the device. The driver tracks the virtual memory ranges\nallocated with this function and automatically accelerates calls to\nfunctions such as :py:obj:`~.cudaMemcpy()`. Since the memory can be\naccessed directly by the device, it can be read or written with much\nhigher bandwidth than pageable memory obtained with functions such as\n:py:obj:`~.malloc()`. Allocating excessive amounts of pinned memory may\ndegrade system performance, since it reduces the amount of memory\navailable to the system for paging. As a result, this function is best\nused sparingly to allocate staging areas for data exchange between host\nand device.\n\nThe `flags` parameter enables different options to be specified that\naffect the allocation, as follows.\n\n- :py:obj:`~.cudaHostAllocDefault`: This flag's value is defined to be\n 0 and causes :py:obj:`~.cudaHostAlloc()` to emulate\n :py:obj:`~.cudaMallocHost()`.\n\n- :py:obj:`~.cudaHostAllocPortable`: The memory returned by this call\n will be considered as pinned memory by all CUDA contexts, not just\n the one that performed the allocation.\n\n- :py:obj:`~.cudaHostAllocMapped`: Maps the allocation into the CUDA\n address space. The device pointer to the memory may be obtained by\n calling :py:obj:`~.cudaHostGetDevicePointer()`.\n\n- :py:obj:`~.cudaHostAllocWriteCombined`: Allocates the memory as\n write-combined (WC). WC memory can be transferred across the PCI\n Express bus more quickly on some system configurations, but cannot be\n read efficiently by most CPUs. WC memory is a good option for buffers\n that will be written by the CPU and read by the device via mapped\n pinned memory or host->device transfers.\n\nAll of these flags are orthogonal to one another: a developer may\nallocate memory that is portable, mapped and/or write-combined with no\nrestricti""ons.\n\nIn order for the :py:obj:`~.cudaHostAllocMapped` flag to have any\neffect, the CUDA context must support the :py:obj:`~.cudaDeviceMapHost`\nflag, which can be checked via :py:obj:`~.cudaGetDeviceFlags()`. The\n:py:obj:`~.cudaDeviceMapHost` flag is implicitly set for contexts\ncreated via the runtime API.\n\nThe :py:obj:`~.cudaHostAllocMapped` flag may be specified on CUDA\ncontexts for devices that do not support mapped pinned memory. The\nfailure is deferred to :py:obj:`~.cudaHostGetDevicePointer()` because\nthe memory may be mapped into other CUDA contexts via the\n:py:obj:`~.cudaHostAllocPortable` flag.\n\nMemory allocated by this function must be freed with\n:py:obj:`~.cudaFreeHost()`.\n\nParameters\n----------\nsize : size_t\n Requested allocation size in bytes\nflags : unsigned int\n Requested properties of allocated memory\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorMemoryAllocation`\npHost : Any\n Device pointer to allocated memory\n\nSee Also\n--------\n:py:obj:`~.cudaSetDeviceFlags`, :py:obj:`~.cudaMallocHost (C API)`, :py:obj:`~.cudaFreeHost`, :py:obj:`~.cudaGetDeviceFlags`, :py:obj:`~.cuMemHostAlloc`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_197cudaHostAlloc = {"cudaHostAlloc", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_197cudaHostAlloc, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_196cudaHostAlloc}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_197cudaHostAlloc(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { size_t __pyx_v_size; unsigned int __pyx_v_flags; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaHostAlloc (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_size_2,&__pyx_mstate_global->__pyx_n_u_flags_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 19934, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 19934, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19934, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaHostAlloc", 0) < (0)) __PYX_ERR(0, 19934, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaHostAlloc", 1, 2, 2, i); __PYX_ERR(0, 19934, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 19934, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 19934, __pyx_L3_error) } __pyx_v_size = __Pyx_PyLong_As_size_t(values[0]); if (unlikely((__pyx_v_size == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 19935, __pyx_L3_error) __pyx_v_flags = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_flags == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 19935, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaHostAlloc", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 19934, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaHostAlloc", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_196cudaHostAlloc(__pyx_self, __pyx_v_size, __pyx_v_flags); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_196cudaHostAlloc(CYTHON_UNUSED PyObject *__pyx_self, size_t __pyx_v_size, unsigned int __pyx_v_flags) { __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v_pHost; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations cudaError_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaHostAlloc", 0); /* "cuda/bindings/runtime.pyx":20009 * :py:obj:`~.cudaSetDeviceFlags`, :py:obj:`~.cudaMallocHost (C API)`, :py:obj:`~.cudaFreeHost`, :py:obj:`~.cudaGetDeviceFlags`, :py:obj:`~.cuMemHostAlloc` * """ * cdef void_ptr pHost = 0 # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaHostAlloc(&pHost, size, flags) */ __pyx_v_pHost = 0; /* "cuda/bindings/runtime.pyx":20010 * """ * cdef void_ptr pHost = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaHostAlloc(&pHost, size, flags) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":20011 * cdef void_ptr pHost = 0 * with nogil: * err = cyruntime.cudaHostAlloc(&pHost, size, flags) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_1 = __pyx_f_4cuda_8bindings_9cyruntime_cudaHostAlloc(((void **)(&__pyx_v_pHost)), __pyx_v_size, __pyx_v_flags); if (unlikely(__pyx_t_1 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20011, __pyx_L4_error) __pyx_v_err = __pyx_t_1; } /* "cuda/bindings/runtime.pyx":20010 * """ * cdef void_ptr pHost = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaHostAlloc(&pHost, size, flags) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":20012 * with nogil: * err = cyruntime.cudaHostAlloc(&pHost, size, flags) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pHost) */ __pyx_t_2 = (__pyx_v_err != cudaSuccess); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":20013 * err = cyruntime.cudaHostAlloc(&pHost, size, flags) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], pHost) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 20013, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, Py_None) != (0)) __PYX_ERR(0, 20013, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":20012 * with nogil: * err = cyruntime.cudaHostAlloc(&pHost, size, flags) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pHost) */ } /* "cuda/bindings/runtime.pyx":20014 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pHost) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20014, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20014, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20014, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(__pyx_v_pHost); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20014, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20014, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 20014, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_5) != (0)) __PYX_ERR(0, 20014, __pyx_L1_error); __pyx_t_3 = 0; __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":19934 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaHostAlloc(size_t size, unsigned int flags): * """ Allocates page-locked memory on the host. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaHostAlloc", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":20016 * return (_dict_cudaError_t[err], pHost) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaHostRegister(ptr, size_t size, unsigned int flags): * """ Registers an existing host memory range for use by CUDA. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_199cudaHostRegister(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_198cudaHostRegister, "cudaHostRegister(ptr, size_t size, unsigned int flags)\n\nRegisters an existing host memory range for use by CUDA.\n\nPage-locks the memory range specified by `ptr` and `size` and maps it\nfor the device(s) as specified by `flags`. This memory range also is\nadded to the same tracking mechanism as :py:obj:`~.cudaHostAlloc()` to\nautomatically accelerate calls to functions such as\n:py:obj:`~.cudaMemcpy()`. Since the memory can be accessed directly by\nthe device, it can be read or written with much higher bandwidth than\npageable memory that has not been registered. Page-locking excessive\namounts of memory may degrade system performance, since it reduces the\namount of memory available to the system for paging. As a result, this\nfunction is best used sparingly to register staging areas for data\nexchange between host and device.\n\nOn systems where :py:obj:`~.pageableMemoryAccessUsesHostPageTables` is\ntrue, :py:obj:`~.cudaHostRegister` will not page-lock the memory range\nspecified by `ptr` but only populate unpopulated pages.\n\n:py:obj:`~.cudaHostRegister` is supported only on I/O coherent devices\nthat have a non-zero value for the device attribute\n:py:obj:`~.cudaDevAttrHostRegisterSupported`.\n\nThe `flags` parameter enables different options to be specified that\naffect the allocation, as follows.\n\n- :py:obj:`~.cudaHostRegisterDefault`: On a system with unified virtual\n addressing, the memory will be both mapped and portable. On a system\n with no unified virtual addressing, the memory will be neither mapped\n nor portable.\n\n- :py:obj:`~.cudaHostRegisterPortable`: The memory returned by this\n call will be considered as pinned memory by all CUDA contexts, not\n just the one that performed the allocation.\n\n- :py:obj:`~.cudaHostRegisterMapped`: Maps the allocation into the CUDA\n address space. The device pointer to the memory may be obtained by\n calling :py:obj:`~.cudaHostGetDevicePointer()`.\n\n- :py:obj:`~.cudaHostRegisterIoMemory`: The pass""ed memory pointer is\n treated as pointing to some memory-mapped I/O space, e.g. belonging\n to a third-party PCIe device, and it will marked as non cache-\n coherent and contiguous.\n\n- :py:obj:`~.cudaHostRegisterReadOnly`: The passed memory pointer is\n treated as pointing to memory that is considered read-only by the\n device. On platforms without\n :py:obj:`~.cudaDevAttrPageableMemoryAccessUsesHostPageTables`, this\n flag is required in order to register memory mapped to the CPU as\n read-only. Support for the use of this flag can be queried from the\n device attribute\n :py:obj:`~.cudaDevAttrHostRegisterReadOnlySupported`. Using this flag\n with a current context associated with a device that does not have\n this attribute set will cause :py:obj:`~.cudaHostRegister` to error\n with cudaErrorNotSupported.\n\nAll of these flags are orthogonal to one another: a developer may page-\nlock memory that is portable or mapped with no restrictions.\n\nThe CUDA context must have been created with the\n:py:obj:`~.cudaMapHost` flag in order for the\n:py:obj:`~.cudaHostRegisterMapped` flag to have any effect.\n\nThe :py:obj:`~.cudaHostRegisterMapped` flag may be specified on CUDA\ncontexts for devices that do not support mapped pinned memory. The\nfailure is deferred to :py:obj:`~.cudaHostGetDevicePointer()` because\nthe memory may be mapped into other CUDA contexts via the\n:py:obj:`~.cudaHostRegisterPortable` flag.\n\nFor devices that have a non-zero value for the device attribute\n:py:obj:`~.cudaDevAttrCanUseHostPointerForRegisteredMem`, the memory\ncan also be accessed from the device using the host pointer `ptr`. The\ndevice pointer returned by :py:obj:`~.cudaHostGetDevicePointer()` may\nor may not match the original host pointer `ptr` and depends on the\ndevices visible to the application. If all devices visible to the\napplication have a non-zero value for the device attribute, the device\npointer returned by :py:obj:`~.cudaHostGetDevicePointer()` will m""atch\nthe original pointer `ptr`. If any device visible to the application\nhas a zero value for the device attribute, the device pointer returned\nby :py:obj:`~.cudaHostGetDevicePointer()` will not match the original\nhost pointer `ptr`, but it will be suitable for use on all devices\nprovided Unified Virtual Addressing is enabled. In such systems, it is\nvalid to access the memory using either pointer on devices that have a\nnon-zero value for the device attribute. Note however that such devices\nshould access the memory using only of the two pointers and not both.\n\nThe memory page-locked by this function must be unregistered with\n:py:obj:`~.cudaHostUnregister()`.\n\nParameters\n----------\nptr : Any\n Host pointer to memory to page-lock\nsize : size_t\n Size in bytes of the address range to page-lock in bytes\nflags : unsigned int\n Flags for allocation request\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorMemoryAllocation`, :py:obj:`~.cudaErrorHostMemoryAlreadyRegistered`, :py:obj:`~.cudaErrorNotSupported`\n\nSee Also\n--------\n:py:obj:`~.cudaHostUnregister`, :py:obj:`~.cudaHostGetFlags`, :py:obj:`~.cudaHostGetDevicePointer`, :py:obj:`~.cuMemHostRegister`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_199cudaHostRegister = {"cudaHostRegister", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_199cudaHostRegister, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_198cudaHostRegister}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_199cudaHostRegister(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_ptr = 0; size_t __pyx_v_size; unsigned int __pyx_v_flags; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[3] = {0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaHostRegister (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,&__pyx_mstate_global->__pyx_n_u_size_2,&__pyx_mstate_global->__pyx_n_u_flags_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20016, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 20016, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20016, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20016, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaHostRegister", 0) < (0)) __PYX_ERR(0, 20016, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 3; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaHostRegister", 1, 3, 3, i); __PYX_ERR(0, 20016, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 3)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20016, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20016, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 20016, __pyx_L3_error) } __pyx_v_ptr = values[0]; __pyx_v_size = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_size == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 20017, __pyx_L3_error) __pyx_v_flags = __Pyx_PyLong_As_unsigned_int(values[2]); if (unlikely((__pyx_v_flags == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20017, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaHostRegister", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 20016, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaHostRegister", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_198cudaHostRegister(__pyx_self, __pyx_v_ptr, __pyx_v_size, __pyx_v_flags); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_198cudaHostRegister(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_ptr, size_t __pyx_v_size, unsigned int __pyx_v_flags) { struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cyptr = NULL; void *__pyx_v_cyptr_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_5; cudaError_t __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaHostRegister", 0); /* "cuda/bindings/runtime.pyx":20124 * :py:obj:`~.cudaHostUnregister`, :py:obj:`~.cudaHostGetFlags`, :py:obj:`~.cudaHostGetDevicePointer`, :py:obj:`~.cuMemHostRegister` * """ * cyptr = _HelperInputVoidPtr(ptr) # <<<<<<<<<<<<<< * cdef void* cyptr_ptr = cyptr.cptr * with nogil: */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_ptr}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20124, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_cyptr = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":20125 * """ * cyptr = _HelperInputVoidPtr(ptr) * cdef void* cyptr_ptr = cyptr.cptr # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaHostRegister(cyptr_ptr, size, flags) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cyptr), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20125, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_5 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 20125, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cyptr_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_5)); /* "cuda/bindings/runtime.pyx":20126 * cyptr = _HelperInputVoidPtr(ptr) * cdef void* cyptr_ptr = cyptr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaHostRegister(cyptr_ptr, size, flags) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":20127 * cdef void* cyptr_ptr = cyptr.cptr * with nogil: * err = cyruntime.cudaHostRegister(cyptr_ptr, size, flags) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_6 = __pyx_f_4cuda_8bindings_9cyruntime_cudaHostRegister(__pyx_v_cyptr_ptr, __pyx_v_size, __pyx_v_flags); if (unlikely(__pyx_t_6 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20127, __pyx_L4_error) __pyx_v_err = __pyx_t_6; } /* "cuda/bindings/runtime.pyx":20126 * cyptr = _HelperInputVoidPtr(ptr) * cdef void* cyptr_ptr = cyptr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaHostRegister(cyptr_ptr, size, flags) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":20128 * with nogil: * err = cyruntime.cudaHostRegister(cyptr_ptr, size, flags) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20128, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20128, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20128, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20128, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 20128, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":20016 * return (_dict_cudaError_t[err], pHost) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaHostRegister(ptr, size_t size, unsigned int flags): * """ Registers an existing host memory range for use by CUDA. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaHostRegister", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_cyptr); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":20130 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaHostUnregister(ptr): * """ Unregisters a memory range that was registered with cudaHostRegister. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_201cudaHostUnregister(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_200cudaHostUnregister, "cudaHostUnregister(ptr)\n\nUnregisters a memory range that was registered with cudaHostRegister.\n\nUnmaps the memory range whose base address is specified by `ptr`, and\nmakes it pageable again.\n\nThe base address must be the same one specified to\n:py:obj:`~.cudaHostRegister()`.\n\nParameters\n----------\nptr : Any\n Host pointer to memory to unregister\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorHostMemoryNotRegistered`\n\nSee Also\n--------\n:py:obj:`~.cudaHostUnregister`, :py:obj:`~.cuMemHostUnregister`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_201cudaHostUnregister = {"cudaHostUnregister", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_201cudaHostUnregister, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_200cudaHostUnregister}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_201cudaHostUnregister(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_ptr = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaHostUnregister (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20130, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20130, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaHostUnregister", 0) < (0)) __PYX_ERR(0, 20130, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaHostUnregister", 1, 1, 1, i); __PYX_ERR(0, 20130, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20130, __pyx_L3_error) } __pyx_v_ptr = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaHostUnregister", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20130, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaHostUnregister", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_200cudaHostUnregister(__pyx_self, __pyx_v_ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_200cudaHostUnregister(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_ptr) { struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cyptr = NULL; void *__pyx_v_cyptr_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_5; cudaError_t __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaHostUnregister", 0); /* "cuda/bindings/runtime.pyx":20154 * :py:obj:`~.cudaHostUnregister`, :py:obj:`~.cuMemHostUnregister` * """ * cyptr = _HelperInputVoidPtr(ptr) # <<<<<<<<<<<<<< * cdef void* cyptr_ptr = cyptr.cptr * with nogil: */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_ptr}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20154, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_cyptr = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":20155 * """ * cyptr = _HelperInputVoidPtr(ptr) * cdef void* cyptr_ptr = cyptr.cptr # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaHostUnregister(cyptr_ptr) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cyptr), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20155, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_5 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 20155, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cyptr_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_5)); /* "cuda/bindings/runtime.pyx":20156 * cyptr = _HelperInputVoidPtr(ptr) * cdef void* cyptr_ptr = cyptr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaHostUnregister(cyptr_ptr) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":20157 * cdef void* cyptr_ptr = cyptr.cptr * with nogil: * err = cyruntime.cudaHostUnregister(cyptr_ptr) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_6 = __pyx_f_4cuda_8bindings_9cyruntime_cudaHostUnregister(__pyx_v_cyptr_ptr); if (unlikely(__pyx_t_6 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20157, __pyx_L4_error) __pyx_v_err = __pyx_t_6; } /* "cuda/bindings/runtime.pyx":20156 * cyptr = _HelperInputVoidPtr(ptr) * cdef void* cyptr_ptr = cyptr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaHostUnregister(cyptr_ptr) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":20158 * with nogil: * err = cyruntime.cudaHostUnregister(cyptr_ptr) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20158, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20158, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20158, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20158, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 20158, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":20130 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaHostUnregister(ptr): * """ Unregisters a memory range that was registered with cudaHostRegister. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaHostUnregister", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_cyptr); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":20160 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaHostGetDevicePointer(pHost, unsigned int flags): * """ Passes back device pointer of mapped host memory allocated by cudaHostAlloc or registered by cudaHostRegister. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_203cudaHostGetDevicePointer(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_202cudaHostGetDevicePointer, "cudaHostGetDevicePointer(pHost, unsigned int flags)\n\nPasses back device pointer of mapped host memory allocated by cudaHostAlloc or registered by cudaHostRegister.\n\nPasses back the device pointer corresponding to the mapped, pinned host\nbuffer allocated by :py:obj:`~.cudaHostAlloc()` or registered by\n:py:obj:`~.cudaHostRegister()`.\n\n:py:obj:`~.cudaHostGetDevicePointer()` will fail if the\n:py:obj:`~.cudaDeviceMapHost` flag was not specified before deferred\ncontext creation occurred, or if called on a device that does not\nsupport mapped, pinned memory.\n\nFor devices that have a non-zero value for the device attribute\n:py:obj:`~.cudaDevAttrCanUseHostPointerForRegisteredMem`, the memory\ncan also be accessed from the device using the host pointer `pHost`.\nThe device pointer returned by :py:obj:`~.cudaHostGetDevicePointer()`\nmay or may not match the original host pointer `pHost` and depends on\nthe devices visible to the application. If all devices visible to the\napplication have a non-zero value for the device attribute, the device\npointer returned by :py:obj:`~.cudaHostGetDevicePointer()` will match\nthe original pointer `pHost`. If any device visible to the application\nhas a zero value for the device attribute, the device pointer returned\nby :py:obj:`~.cudaHostGetDevicePointer()` will not match the original\nhost pointer `pHost`, but it will be suitable for use on all devices\nprovided Unified Virtual Addressing is enabled. In such systems, it is\nvalid to access the memory using either pointer on devices that have a\nnon-zero value for the device attribute. Note however that such devices\nshould access the memory using only of the two pointers and not both.\n\n`flags` provides for future releases. For now, it must be set to 0.\n\nParameters\n----------\npHost : Any\n Requested host pointer mapping\nflags : unsigned int\n Flags for extensions (must be 0 for now)\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErro""rInvalidValue`, :py:obj:`~.cudaErrorMemoryAllocation`\npDevice : Any\n Returned device pointer for mapped memory\n\nSee Also\n--------\n:py:obj:`~.cudaSetDeviceFlags`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_203cudaHostGetDevicePointer = {"cudaHostGetDevicePointer", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_203cudaHostGetDevicePointer, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_202cudaHostGetDevicePointer}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_203cudaHostGetDevicePointer(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_pHost = 0; unsigned int __pyx_v_flags; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaHostGetDevicePointer (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pHost,&__pyx_mstate_global->__pyx_n_u_flags_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20160, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20160, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20160, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaHostGetDevicePointer", 0) < (0)) __PYX_ERR(0, 20160, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaHostGetDevicePointer", 1, 2, 2, i); __PYX_ERR(0, 20160, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20160, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20160, __pyx_L3_error) } __pyx_v_pHost = values[0]; __pyx_v_flags = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_flags == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20161, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaHostGetDevicePointer", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 20160, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaHostGetDevicePointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_202cudaHostGetDevicePointer(__pyx_self, __pyx_v_pHost, __pyx_v_flags); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_202cudaHostGetDevicePointer(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_pHost, unsigned int __pyx_v_flags) { __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v_pDevice; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cypHost = NULL; void *__pyx_v_cypHost_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_5; cudaError_t __pyx_t_6; int __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaHostGetDevicePointer", 0); /* "cuda/bindings/runtime.pyx":20210 * :py:obj:`~.cudaSetDeviceFlags`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.cuMemHostGetDevicePointer` * """ * cdef void_ptr pDevice = 0 # <<<<<<<<<<<<<< * cypHost = _HelperInputVoidPtr(pHost) * cdef void* cypHost_ptr = cypHost.cptr */ __pyx_v_pDevice = 0; /* "cuda/bindings/runtime.pyx":20211 * """ * cdef void_ptr pDevice = 0 * cypHost = _HelperInputVoidPtr(pHost) # <<<<<<<<<<<<<< * cdef void* cypHost_ptr = cypHost.cptr * with nogil: */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_pHost}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20211, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_cypHost = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":20212 * cdef void_ptr pDevice = 0 * cypHost = _HelperInputVoidPtr(pHost) * cdef void* cypHost_ptr = cypHost.cptr # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaHostGetDevicePointer(&pDevice, cypHost_ptr, flags) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cypHost), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20212, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_5 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 20212, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cypHost_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_5)); /* "cuda/bindings/runtime.pyx":20213 * cypHost = _HelperInputVoidPtr(pHost) * cdef void* cypHost_ptr = cypHost.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaHostGetDevicePointer(&pDevice, cypHost_ptr, flags) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":20214 * cdef void* cypHost_ptr = cypHost.cptr * with nogil: * err = cyruntime.cudaHostGetDevicePointer(&pDevice, cypHost_ptr, flags) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_6 = __pyx_f_4cuda_8bindings_9cyruntime_cudaHostGetDevicePointer(((void **)(&__pyx_v_pDevice)), __pyx_v_cypHost_ptr, __pyx_v_flags); if (unlikely(__pyx_t_6 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20214, __pyx_L4_error) __pyx_v_err = __pyx_t_6; } /* "cuda/bindings/runtime.pyx":20213 * cypHost = _HelperInputVoidPtr(pHost) * cdef void* cypHost_ptr = cypHost.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaHostGetDevicePointer(&pDevice, cypHost_ptr, flags) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":20215 * with nogil: * err = cyruntime.cudaHostGetDevicePointer(&pDevice, cypHost_ptr, flags) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pDevice) */ __pyx_t_7 = (__pyx_v_err != cudaSuccess); if (__pyx_t_7) { /* "cuda/bindings/runtime.pyx":20216 * err = cyruntime.cudaHostGetDevicePointer(&pDevice, cypHost_ptr, flags) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], pDevice) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20216, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20216, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20216, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20216, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 20216, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 20216, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":20215 * with nogil: * err = cyruntime.cudaHostGetDevicePointer(&pDevice, cypHost_ptr, flags) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pDevice) */ } /* "cuda/bindings/runtime.pyx":20217 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pDevice) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20217, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20217, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20217, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(__pyx_v_pDevice); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20217, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20217, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 20217, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2) != (0)) __PYX_ERR(0, 20217, __pyx_L1_error); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":20160 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaHostGetDevicePointer(pHost, unsigned int flags): * """ Passes back device pointer of mapped host memory allocated by cudaHostAlloc or registered by cudaHostRegister. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaHostGetDevicePointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_cypHost); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":20219 * return (_dict_cudaError_t[err], pDevice) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaHostGetFlags(pHost): * """ Passes back flags used to allocate pinned host memory allocated by cudaHostAlloc. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_205cudaHostGetFlags(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_204cudaHostGetFlags, "cudaHostGetFlags(pHost)\n\nPasses back flags used to allocate pinned host memory allocated by cudaHostAlloc.\n\n:py:obj:`~.cudaHostGetFlags()` will fail if the input pointer does not\nreside in an address range allocated by :py:obj:`~.cudaHostAlloc()`.\n\nParameters\n----------\npHost : Any\n Host pointer\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\npFlags : unsigned int\n Returned flags word\n\nSee Also\n--------\n:py:obj:`~.cudaHostAlloc`, :py:obj:`~.cuMemHostGetFlags`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_205cudaHostGetFlags = {"cudaHostGetFlags", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_205cudaHostGetFlags, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_204cudaHostGetFlags}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_205cudaHostGetFlags(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_pHost = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaHostGetFlags (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pHost,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20219, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20219, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaHostGetFlags", 0) < (0)) __PYX_ERR(0, 20219, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaHostGetFlags", 1, 1, 1, i); __PYX_ERR(0, 20219, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20219, __pyx_L3_error) } __pyx_v_pHost = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaHostGetFlags", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20219, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaHostGetFlags", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_204cudaHostGetFlags(__pyx_self, __pyx_v_pHost); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_204cudaHostGetFlags(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_pHost) { unsigned int __pyx_v_pFlags; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cypHost = NULL; void *__pyx_v_cypHost_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_5; cudaError_t __pyx_t_6; int __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaHostGetFlags", 0); /* "cuda/bindings/runtime.pyx":20242 * :py:obj:`~.cudaHostAlloc`, :py:obj:`~.cuMemHostGetFlags` * """ * cdef unsigned int pFlags = 0 # <<<<<<<<<<<<<< * cypHost = _HelperInputVoidPtr(pHost) * cdef void* cypHost_ptr = cypHost.cptr */ __pyx_v_pFlags = 0; /* "cuda/bindings/runtime.pyx":20243 * """ * cdef unsigned int pFlags = 0 * cypHost = _HelperInputVoidPtr(pHost) # <<<<<<<<<<<<<< * cdef void* cypHost_ptr = cypHost.cptr * with nogil: */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_pHost}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20243, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_cypHost = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":20244 * cdef unsigned int pFlags = 0 * cypHost = _HelperInputVoidPtr(pHost) * cdef void* cypHost_ptr = cypHost.cptr # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaHostGetFlags(&pFlags, cypHost_ptr) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cypHost), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20244, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_5 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 20244, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cypHost_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_5)); /* "cuda/bindings/runtime.pyx":20245 * cypHost = _HelperInputVoidPtr(pHost) * cdef void* cypHost_ptr = cypHost.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaHostGetFlags(&pFlags, cypHost_ptr) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":20246 * cdef void* cypHost_ptr = cypHost.cptr * with nogil: * err = cyruntime.cudaHostGetFlags(&pFlags, cypHost_ptr) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_6 = __pyx_f_4cuda_8bindings_9cyruntime_cudaHostGetFlags((&__pyx_v_pFlags), __pyx_v_cypHost_ptr); if (unlikely(__pyx_t_6 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20246, __pyx_L4_error) __pyx_v_err = __pyx_t_6; } /* "cuda/bindings/runtime.pyx":20245 * cypHost = _HelperInputVoidPtr(pHost) * cdef void* cypHost_ptr = cypHost.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaHostGetFlags(&pFlags, cypHost_ptr) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":20247 * with nogil: * err = cyruntime.cudaHostGetFlags(&pFlags, cypHost_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pFlags) */ __pyx_t_7 = (__pyx_v_err != cudaSuccess); if (__pyx_t_7) { /* "cuda/bindings/runtime.pyx":20248 * err = cyruntime.cudaHostGetFlags(&pFlags, cypHost_ptr) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], pFlags) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20248, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20248, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20248, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20248, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 20248, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 20248, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":20247 * with nogil: * err = cyruntime.cudaHostGetFlags(&pFlags, cypHost_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pFlags) */ } /* "cuda/bindings/runtime.pyx":20249 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pFlags) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyLong_From_unsigned_int(__pyx_v_pFlags); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 20249, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2) != (0)) __PYX_ERR(0, 20249, __pyx_L1_error); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":20219 * return (_dict_cudaError_t[err], pDevice) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaHostGetFlags(pHost): * """ Passes back flags used to allocate pinned host memory allocated by cudaHostAlloc. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaHostGetFlags", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_cypHost); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":20251 * return (_dict_cudaError_t[err], pFlags) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMalloc3D(extent not None : cudaExtent): * """ Allocates logical 1D, 2D, or 3D memory objects on the device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_207cudaMalloc3D(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_206cudaMalloc3D, "cudaMalloc3D(cudaExtent extent: cudaExtent)\n\nAllocates logical 1D, 2D, or 3D memory objects on the device.\n\nAllocates at least `width` * `height` * `depth` bytes of linear memory\non the device and returns a :py:obj:`~.cudaPitchedPtr` in which `ptr`\nis a pointer to the allocated memory. The function may pad the\nallocation to ensure hardware alignment requirements are met. The pitch\nreturned in the `pitch` field of `pitchedDevPtr` is the width in bytes\nof the allocation.\n\nThe returned :py:obj:`~.cudaPitchedPtr` contains additional fields\n`xsize` and `ysize`, the logical width and height of the allocation,\nwhich are equivalent to the `width` and `height` `extent` parameters\nprovided by the programmer during allocation.\n\nFor allocations of 2D and 3D objects, it is highly recommended that\nprogrammers perform allocations using :py:obj:`~.cudaMalloc3D()` or\n:py:obj:`~.cudaMallocPitch()`. Due to alignment restrictions in the\nhardware, this is especially true if the application will be performing\nmemory copies involving 2D or 3D objects (whether linear memory or CUDA\narrays).\n\nParameters\n----------\nextent : :py:obj:`~.cudaExtent`\n Requested allocation size (`width` field in bytes)\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorMemoryAllocation`\npitchedDevPtr : :py:obj:`~.cudaPitchedPtr`\n Pointer to allocated pitched device memory\n\nSee Also\n--------\n:py:obj:`~.cudaMallocPitch`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaMemcpy3D`, :py:obj:`~.cudaMemset3D`, :py:obj:`~.cudaMalloc3DArray`, :py:obj:`~.cudaMallocArray`, :py:obj:`~.cudaFreeArray`, :py:obj:`~.cudaMallocHost (C API)`, :py:obj:`~.cudaFreeHost`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.make_cudaPitchedPtr`, :py:obj:`~.make_cudaExtent`, :py:obj:`~.cuMemAllocPitch`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_207cudaMalloc3D = {"cudaMalloc3D", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_207cudaMalloc3D, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_206cudaMalloc3D}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_207cudaMalloc3D(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaExtent *__pyx_v_extent = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMalloc3D (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_extent_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20251, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20251, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMalloc3D", 0) < (0)) __PYX_ERR(0, 20251, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMalloc3D", 1, 1, 1, i); __PYX_ERR(0, 20251, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20251, __pyx_L3_error) } __pyx_v_extent = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaExtent *)values[0]); } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMalloc3D", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20251, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMalloc3D", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_extent), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaExtent, 0, "extent", 0))) __PYX_ERR(0, 20252, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_206cudaMalloc3D(__pyx_self, __pyx_v_extent); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_206cudaMalloc3D(CYTHON_UNUSED PyObject *__pyx_self, struct __pyx_obj_4cuda_8bindings_7runtime_cudaExtent *__pyx_v_extent) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaPitchedPtr *__pyx_v_pitchedDevPtr = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; cudaError_t __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMalloc3D", 0); /* "cuda/bindings/runtime.pyx":20290 * :py:obj:`~.cudaMallocPitch`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaMemcpy3D`, :py:obj:`~.cudaMemset3D`, :py:obj:`~.cudaMalloc3DArray`, :py:obj:`~.cudaMallocArray`, :py:obj:`~.cudaFreeArray`, :py:obj:`~.cudaMallocHost (C API)`, :py:obj:`~.cudaFreeHost`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.make_cudaPitchedPtr`, :py:obj:`~.make_cudaExtent`, :py:obj:`~.cuMemAllocPitch` * """ * cdef cudaPitchedPtr pitchedDevPtr = cudaPitchedPtr() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMalloc3D(pitchedDevPtr._pvt_ptr, extent._pvt_ptr[0]) */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaPitchedPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaPitchedPtr); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (1-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20290, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_pitchedDevPtr = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaPitchedPtr *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":20291 * """ * cdef cudaPitchedPtr pitchedDevPtr = cudaPitchedPtr() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMalloc3D(pitchedDevPtr._pvt_ptr, extent._pvt_ptr[0]) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":20292 * cdef cudaPitchedPtr pitchedDevPtr = cudaPitchedPtr() * with nogil: * err = cyruntime.cudaMalloc3D(pitchedDevPtr._pvt_ptr, extent._pvt_ptr[0]) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_5 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMalloc3D(((struct cudaPitchedPtr *)__pyx_v_pitchedDevPtr->_pvt_ptr), (__pyx_v_extent->_pvt_ptr[0])); if (unlikely(__pyx_t_5 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20292, __pyx_L4_error) __pyx_v_err = __pyx_t_5; } /* "cuda/bindings/runtime.pyx":20291 * """ * cdef cudaPitchedPtr pitchedDevPtr = cudaPitchedPtr() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMalloc3D(pitchedDevPtr._pvt_ptr, extent._pvt_ptr[0]) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":20293 * with nogil: * err = cyruntime.cudaMalloc3D(pitchedDevPtr._pvt_ptr, extent._pvt_ptr[0]) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pitchedDevPtr) */ __pyx_t_6 = (__pyx_v_err != cudaSuccess); if (__pyx_t_6) { /* "cuda/bindings/runtime.pyx":20294 * err = cyruntime.cudaMalloc3D(pitchedDevPtr._pvt_ptr, extent._pvt_ptr[0]) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], pitchedDevPtr) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20294, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20294, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20294, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20294, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 20294, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 20294, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":20293 * with nogil: * err = cyruntime.cudaMalloc3D(pitchedDevPtr._pvt_ptr, extent._pvt_ptr[0]) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pitchedDevPtr) */ } /* "cuda/bindings/runtime.pyx":20295 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pitchedDevPtr) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20295, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20295, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20295, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20295, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 20295, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_pitchedDevPtr); __Pyx_GIVEREF((PyObject *)__pyx_v_pitchedDevPtr); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_pitchedDevPtr)) != (0)) __PYX_ERR(0, 20295, __pyx_L1_error); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":20251 * return (_dict_cudaError_t[err], pFlags) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMalloc3D(extent not None : cudaExtent): * """ Allocates logical 1D, 2D, or 3D memory objects on the device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMalloc3D", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_pitchedDevPtr); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":20297 * return (_dict_cudaError_t[err], pitchedDevPtr) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMalloc3DArray(desc : Optional[cudaChannelFormatDesc], extent not None : cudaExtent, unsigned int flags): * """ Allocate an array on the device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_209cudaMalloc3DArray(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_208cudaMalloc3DArray, "cudaMalloc3DArray(cudaChannelFormatDesc desc: Optional[cudaChannelFormatDesc], cudaExtent extent: cudaExtent, unsigned int flags)\n\nAllocate an array on the device.\n\nAllocates a CUDA array according to the\n:py:obj:`~.cudaChannelFormatDesc` structure `desc` and returns a handle\nto the new CUDA array in `*array`.\n\nThe :py:obj:`~.cudaChannelFormatDesc` is defined as:\n\n**View CUDA Toolkit Documentation for a C++ code example**\n\nwhere :py:obj:`~.cudaChannelFormatKind` is one of\n:py:obj:`~.cudaChannelFormatKindSigned`,\n:py:obj:`~.cudaChannelFormatKindUnsigned`, or\n:py:obj:`~.cudaChannelFormatKindFloat`.\n\n:py:obj:`~.cudaMalloc3DArray()` can allocate the following:\n\n- A 1D array is allocated if the height and depth extents are both\n zero.\n\n- A 2D array is allocated if only the depth extent is zero.\n\n- A 3D array is allocated if all three extents are non-zero.\n\n- A 1D layered CUDA array is allocated if only the height extent is\n zero and the cudaArrayLayered flag is set. Each layer is a 1D array.\n The number of layers is determined by the depth extent.\n\n- A 2D layered CUDA array is allocated if all three extents are non-\n zero and the cudaArrayLayered flag is set. Each layer is a 2D array.\n The number of layers is determined by the depth extent.\n\n- A cubemap CUDA array is allocated if all three extents are non-zero\n and the cudaArrayCubemap flag is set. Width must be equal to height,\n and depth must be six. A cubemap is a special type of 2D layered CUDA\n array, where the six layers represent the six faces of a cube. The\n order of the six layers in memory is the same as that listed in\n :py:obj:`~.cudaGraphicsCubeFace`.\n\n- A cubemap layered CUDA array is allocated if all three extents are\n non-zero, and both, cudaArrayCubemap and cudaArrayLayered flags are\n set. Width must be equal to height, and depth must be a multiple of\n six. A cubemap layered CUDA array is a special type of 2D layered\n CUDA array that consists of a"" collection of cubemaps. The first six\n layers represent the first cubemap, the next six layers form the\n second cubemap, and so on.\n\nThe `flags` parameter enables different options to be specified that\naffect the allocation, as follows.\n\n- :py:obj:`~.cudaArrayDefault`: This flag's value is defined to be 0\n and provides default array allocation\n\n- :py:obj:`~.cudaArrayLayered`: Allocates a layered CUDA array, with\n the depth extent indicating the number of layers\n\n- :py:obj:`~.cudaArrayCubemap`: Allocates a cubemap CUDA array. Width\n must be equal to height, and depth must be six. If the\n cudaArrayLayered flag is also set, depth must be a multiple of six.\n\n- :py:obj:`~.cudaArraySurfaceLoadStore`: Allocates a CUDA array that\n could be read from or written to using a surface reference.\n\n- :py:obj:`~.cudaArrayTextureGather`: This flag indicates that texture\n gather operations will be performed on the CUDA array. Texture gather\n can only be performed on 2D CUDA arrays.\n\n- :py:obj:`~.cudaArraySparse`: Allocates a CUDA array without physical\n backing memory. The subregions within this sparse array can later be\n mapped onto a physical memory allocation by calling\n :py:obj:`~.cuMemMapArrayAsync`. This flag can only be used for\n creating 2D, 3D or 2D layered sparse CUDA arrays. The physical\n backing memory must be allocated via :py:obj:`~.cuMemCreate`.\n\n- :py:obj:`~.cudaArrayDeferredMapping`: Allocates a CUDA array without\n physical backing memory. The entire array can later be mapped onto a\n physical memory allocation by calling :py:obj:`~.cuMemMapArrayAsync`.\n The physical backing memory must be allocated via\n :py:obj:`~.cuMemCreate`.\n\nThe width, height and depth extents must meet certain size requirements\nas listed in the following table. All values are specified in elements.\n\nNote that 2D CUDA arrays have different size requirements if the\n:py:obj:`~.cudaArrayTextureGather` flag is set. In that case, the valid""\nrange for (width, height, depth) is ((1,maxTexture2DGather[0]),\n(1,maxTexture2DGather[1]), 0).\n\n**View CUDA Toolkit Documentation for a table example**\n\nParameters\n----------\ndesc : :py:obj:`~.cudaChannelFormatDesc`\n Requested channel format\nextent : :py:obj:`~.cudaExtent`\n Requested allocation size (`width` field in elements)\nflags : unsigned int\n Flags for extensions\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorMemoryAllocation`\narray : :py:obj:`~.cudaArray_t`\n Pointer to allocated array in device memory\n\nSee Also\n--------\n:py:obj:`~.cudaMalloc3D`, :py:obj:`~.cudaMalloc`, :py:obj:`~.cudaMallocPitch`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaFreeArray`, :py:obj:`~.cudaMallocHost (C API)`, :py:obj:`~.cudaFreeHost`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.make_cudaExtent`, :py:obj:`~.cuArray3DCreate`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_209cudaMalloc3DArray = {"cudaMalloc3DArray", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_209cudaMalloc3DArray, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_208cudaMalloc3DArray}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_209cudaMalloc3DArray(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaChannelFormatDesc *__pyx_v_desc = 0; struct __pyx_obj_4cuda_8bindings_7runtime_cudaExtent *__pyx_v_extent = 0; unsigned int __pyx_v_flags; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[3] = {0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMalloc3DArray (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_desc_2,&__pyx_mstate_global->__pyx_n_u_extent_2,&__pyx_mstate_global->__pyx_n_u_flags_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20297, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 20297, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20297, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20297, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMalloc3DArray", 0) < (0)) __PYX_ERR(0, 20297, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 3; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMalloc3DArray", 1, 3, 3, i); __PYX_ERR(0, 20297, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 3)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20297, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20297, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 20297, __pyx_L3_error) } __pyx_v_desc = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaChannelFormatDesc *)values[0]); __pyx_v_extent = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaExtent *)values[1]); __pyx_v_flags = __Pyx_PyLong_As_unsigned_int(values[2]); if (unlikely((__pyx_v_flags == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20298, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMalloc3DArray", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 20297, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMalloc3DArray", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_desc), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaChannelFormatDesc, 1, "desc", 0))) __PYX_ERR(0, 20298, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_extent), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaExtent, 0, "extent", 0))) __PYX_ERR(0, 20298, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_208cudaMalloc3DArray(__pyx_self, __pyx_v_desc, __pyx_v_extent, __pyx_v_flags); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_208cudaMalloc3DArray(CYTHON_UNUSED PyObject *__pyx_self, struct __pyx_obj_4cuda_8bindings_7runtime_cudaChannelFormatDesc *__pyx_v_desc, struct __pyx_obj_4cuda_8bindings_7runtime_cudaExtent *__pyx_v_extent, unsigned int __pyx_v_flags) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaArray_t *__pyx_v_array = 0; struct cudaChannelFormatDesc *__pyx_v_cydesc_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; struct cudaChannelFormatDesc *__pyx_t_5; int __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMalloc3DArray", 0); /* "cuda/bindings/runtime.pyx":20409 * :py:obj:`~.cudaMalloc3D`, :py:obj:`~.cudaMalloc`, :py:obj:`~.cudaMallocPitch`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaFreeArray`, :py:obj:`~.cudaMallocHost (C API)`, :py:obj:`~.cudaFreeHost`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.make_cudaExtent`, :py:obj:`~.cuArray3DCreate` * """ * cdef cudaArray_t array = cudaArray_t() # <<<<<<<<<<<<<< * cdef cyruntime.cudaChannelFormatDesc* cydesc_ptr = desc._pvt_ptr if desc is not None else NULL * with nogil: */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (1-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20409, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_array = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaArray_t *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":20410 * """ * cdef cudaArray_t array = cudaArray_t() * cdef cyruntime.cudaChannelFormatDesc* cydesc_ptr = desc._pvt_ptr if desc is not None else NULL # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMalloc3DArray(array._pvt_ptr, cydesc_ptr, extent._pvt_ptr[0], flags) */ __pyx_t_6 = (((PyObject *)__pyx_v_desc) != Py_None); if (__pyx_t_6) { __pyx_t_5 = __pyx_v_desc->_pvt_ptr; } else { __pyx_t_5 = NULL; } __pyx_v_cydesc_ptr = __pyx_t_5; /* "cuda/bindings/runtime.pyx":20411 * cdef cudaArray_t array = cudaArray_t() * cdef cyruntime.cudaChannelFormatDesc* cydesc_ptr = desc._pvt_ptr if desc is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMalloc3DArray(array._pvt_ptr, cydesc_ptr, extent._pvt_ptr[0], flags) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":20412 * cdef cyruntime.cudaChannelFormatDesc* cydesc_ptr = desc._pvt_ptr if desc is not None else NULL * with nogil: * err = cyruntime.cudaMalloc3DArray(array._pvt_ptr, cydesc_ptr, extent._pvt_ptr[0], flags) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMalloc3DArray(((cudaArray_t *)__pyx_v_array->_pvt_ptr), __pyx_v_cydesc_ptr, (__pyx_v_extent->_pvt_ptr[0]), __pyx_v_flags); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20412, __pyx_L4_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":20411 * cdef cudaArray_t array = cudaArray_t() * cdef cyruntime.cudaChannelFormatDesc* cydesc_ptr = desc._pvt_ptr if desc is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMalloc3DArray(array._pvt_ptr, cydesc_ptr, extent._pvt_ptr[0], flags) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":20413 * with nogil: * err = cyruntime.cudaMalloc3DArray(array._pvt_ptr, cydesc_ptr, extent._pvt_ptr[0], flags) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], array) */ __pyx_t_6 = (__pyx_v_err != cudaSuccess); if (__pyx_t_6) { /* "cuda/bindings/runtime.pyx":20414 * err = cyruntime.cudaMalloc3DArray(array._pvt_ptr, cydesc_ptr, extent._pvt_ptr[0], flags) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], array) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20414, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20414, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20414, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20414, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 20414, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 20414, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":20413 * with nogil: * err = cyruntime.cudaMalloc3DArray(array._pvt_ptr, cydesc_ptr, extent._pvt_ptr[0], flags) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], array) */ } /* "cuda/bindings/runtime.pyx":20415 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], array) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20415, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 20415, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_array); __Pyx_GIVEREF((PyObject *)__pyx_v_array); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_array)) != (0)) __PYX_ERR(0, 20415, __pyx_L1_error); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":20297 * return (_dict_cudaError_t[err], pitchedDevPtr) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMalloc3DArray(desc : Optional[cudaChannelFormatDesc], extent not None : cudaExtent, unsigned int flags): * """ Allocate an array on the device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMalloc3DArray", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_array); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":20417 * return (_dict_cudaError_t[err], array) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMallocMipmappedArray(desc : Optional[cudaChannelFormatDesc], extent not None : cudaExtent, unsigned int numLevels, unsigned int flags): * """ Allocate a mipmapped array on the device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_211cudaMallocMipmappedArray(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_210cudaMallocMipmappedArray, "cudaMallocMipmappedArray(cudaChannelFormatDesc desc: Optional[cudaChannelFormatDesc], cudaExtent extent: cudaExtent, unsigned int numLevels, unsigned int flags)\n\nAllocate a mipmapped array on the device.\n\nAllocates a CUDA mipmapped array according to the\n:py:obj:`~.cudaChannelFormatDesc` structure `desc` and returns a handle\nto the new CUDA mipmapped array in `*mipmappedArray`. `numLevels`\nspecifies the number of mipmap levels to be allocated. This value is\nclamped to the range [1, 1 + floor(log2(max(width, height, depth)))].\n\nThe :py:obj:`~.cudaChannelFormatDesc` is defined as:\n\n**View CUDA Toolkit Documentation for a C++ code example**\n\nwhere :py:obj:`~.cudaChannelFormatKind` is one of\n:py:obj:`~.cudaChannelFormatKindSigned`,\n:py:obj:`~.cudaChannelFormatKindUnsigned`, or\n:py:obj:`~.cudaChannelFormatKindFloat`.\n\n:py:obj:`~.cudaMallocMipmappedArray()` can allocate the following:\n\n- A 1D mipmapped array is allocated if the height and depth extents are\n both zero.\n\n- A 2D mipmapped array is allocated if only the depth extent is zero.\n\n- A 3D mipmapped array is allocated if all three extents are non-zero.\n\n- A 1D layered CUDA mipmapped array is allocated if only the height\n extent is zero and the cudaArrayLayered flag is set. Each layer is a\n 1D mipmapped array. The number of layers is determined by the depth\n extent.\n\n- A 2D layered CUDA mipmapped array is allocated if all three extents\n are non-zero and the cudaArrayLayered flag is set. Each layer is a 2D\n mipmapped array. The number of layers is determined by the depth\n extent.\n\n- A cubemap CUDA mipmapped array is allocated if all three extents are\n non-zero and the cudaArrayCubemap flag is set. Width must be equal to\n height, and depth must be six. The order of the six layers in memory\n is the same as that listed in :py:obj:`~.cudaGraphicsCubeFace`.\n\n- A cubemap layered CUDA mipmapped array is allocated if all three\n extents are non-zero, and both, cudaArrayCub""emap and cudaArrayLayered\n flags are set. Width must be equal to height, and depth must be a\n multiple of six. A cubemap layered CUDA mipmapped array is a special\n type of 2D layered CUDA mipmapped array that consists of a collection\n of cubemap mipmapped arrays. The first six layers represent the first\n cubemap mipmapped array, the next six layers form the second cubemap\n mipmapped array, and so on.\n\nThe `flags` parameter enables different options to be specified that\naffect the allocation, as follows.\n\n- :py:obj:`~.cudaArrayDefault`: This flag's value is defined to be 0\n and provides default mipmapped array allocation\n\n- :py:obj:`~.cudaArrayLayered`: Allocates a layered CUDA mipmapped\n array, with the depth extent indicating the number of layers\n\n- :py:obj:`~.cudaArrayCubemap`: Allocates a cubemap CUDA mipmapped\n array. Width must be equal to height, and depth must be six. If the\n cudaArrayLayered flag is also set, depth must be a multiple of six.\n\n- :py:obj:`~.cudaArraySurfaceLoadStore`: This flag indicates that\n individual mipmap levels of the CUDA mipmapped array will be read\n from or written to using a surface reference.\n\n- :py:obj:`~.cudaArrayTextureGather`: This flag indicates that texture\n gather operations will be performed on the CUDA array. Texture gather\n can only be performed on 2D CUDA mipmapped arrays, and the gather\n operations are performed only on the most detailed mipmap level.\n\n- :py:obj:`~.cudaArraySparse`: Allocates a CUDA mipmapped array without\n physical backing memory. The subregions within this sparse array can\n later be mapped onto a physical memory allocation by calling\n :py:obj:`~.cuMemMapArrayAsync`. This flag can only be used for\n creating 2D, 3D or 2D layered sparse CUDA mipmapped arrays. The\n physical backing memory must be allocated via\n :py:obj:`~.cuMemCreate`.\n\n- :py:obj:`~.cudaArrayDeferredMapping`: Allocates a CUDA mipmapped\n array without physical backing memory. The"" entire array can later be\n mapped onto a physical memory allocation by calling\n :py:obj:`~.cuMemMapArrayAsync`. The physical backing memory must be\n allocated via :py:obj:`~.cuMemCreate`.\n\nThe width, height and depth extents must meet certain size requirements\nas listed in the following table. All values are specified in elements.\n\n**View CUDA Toolkit Documentation for a table example**\n\nParameters\n----------\ndesc : :py:obj:`~.cudaChannelFormatDesc`\n Requested channel format\nextent : :py:obj:`~.cudaExtent`\n Requested allocation size (`width` field in elements)\nnumLevels : unsigned int\n Number of mipmap levels to allocate\nflags : unsigned int\n Flags for extensions\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorMemoryAllocation`\nmipmappedArray : :py:obj:`~.cudaMipmappedArray_t`\n Pointer to allocated mipmapped array in device memory\n\nSee Also\n--------\n:py:obj:`~.cudaMalloc3D`, :py:obj:`~.cudaMalloc`, :py:obj:`~.cudaMallocPitch`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaFreeArray`, :py:obj:`~.cudaMallocHost (C API)`, :py:obj:`~.cudaFreeHost`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.make_cudaExtent`, :py:obj:`~.cuMipmappedArrayCreate`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_211cudaMallocMipmappedArray = {"cudaMallocMipmappedArray", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_211cudaMallocMipmappedArray, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_210cudaMallocMipmappedArray}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_211cudaMallocMipmappedArray(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaChannelFormatDesc *__pyx_v_desc = 0; struct __pyx_obj_4cuda_8bindings_7runtime_cudaExtent *__pyx_v_extent = 0; unsigned int __pyx_v_numLevels; unsigned int __pyx_v_flags; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[4] = {0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMallocMipmappedArray (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_desc_2,&__pyx_mstate_global->__pyx_n_u_extent_2,&__pyx_mstate_global->__pyx_n_u_numLevels_2,&__pyx_mstate_global->__pyx_n_u_flags_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20417, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 20417, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 20417, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20417, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20417, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMallocMipmappedArray", 0) < (0)) __PYX_ERR(0, 20417, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 4; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMallocMipmappedArray", 1, 4, 4, i); __PYX_ERR(0, 20417, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 4)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20417, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20417, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 20417, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 20417, __pyx_L3_error) } __pyx_v_desc = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaChannelFormatDesc *)values[0]); __pyx_v_extent = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaExtent *)values[1]); __pyx_v_numLevels = __Pyx_PyLong_As_unsigned_int(values[2]); if (unlikely((__pyx_v_numLevels == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20418, __pyx_L3_error) __pyx_v_flags = __Pyx_PyLong_As_unsigned_int(values[3]); if (unlikely((__pyx_v_flags == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20418, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMallocMipmappedArray", 1, 4, 4, __pyx_nargs); __PYX_ERR(0, 20417, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMallocMipmappedArray", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_desc), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaChannelFormatDesc, 1, "desc", 0))) __PYX_ERR(0, 20418, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_extent), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaExtent, 0, "extent", 0))) __PYX_ERR(0, 20418, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_210cudaMallocMipmappedArray(__pyx_self, __pyx_v_desc, __pyx_v_extent, __pyx_v_numLevels, __pyx_v_flags); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_210cudaMallocMipmappedArray(CYTHON_UNUSED PyObject *__pyx_self, struct __pyx_obj_4cuda_8bindings_7runtime_cudaChannelFormatDesc *__pyx_v_desc, struct __pyx_obj_4cuda_8bindings_7runtime_cudaExtent *__pyx_v_extent, unsigned int __pyx_v_numLevels, unsigned int __pyx_v_flags) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaMipmappedArray_t *__pyx_v_mipmappedArray = 0; struct cudaChannelFormatDesc *__pyx_v_cydesc_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; struct cudaChannelFormatDesc *__pyx_t_5; int __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMallocMipmappedArray", 0); /* "cuda/bindings/runtime.pyx":20532 * :py:obj:`~.cudaMalloc3D`, :py:obj:`~.cudaMalloc`, :py:obj:`~.cudaMallocPitch`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaFreeArray`, :py:obj:`~.cudaMallocHost (C API)`, :py:obj:`~.cudaFreeHost`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.make_cudaExtent`, :py:obj:`~.cuMipmappedArrayCreate` * """ * cdef cudaMipmappedArray_t mipmappedArray = cudaMipmappedArray_t() # <<<<<<<<<<<<<< * cdef cyruntime.cudaChannelFormatDesc* cydesc_ptr = desc._pvt_ptr if desc is not None else NULL * with nogil: */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMipmappedArray_t); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMipmappedArray_t); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (1-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20532, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_mipmappedArray = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaMipmappedArray_t *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":20533 * """ * cdef cudaMipmappedArray_t mipmappedArray = cudaMipmappedArray_t() * cdef cyruntime.cudaChannelFormatDesc* cydesc_ptr = desc._pvt_ptr if desc is not None else NULL # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMallocMipmappedArray(mipmappedArray._pvt_ptr, cydesc_ptr, extent._pvt_ptr[0], numLevels, flags) */ __pyx_t_6 = (((PyObject *)__pyx_v_desc) != Py_None); if (__pyx_t_6) { __pyx_t_5 = __pyx_v_desc->_pvt_ptr; } else { __pyx_t_5 = NULL; } __pyx_v_cydesc_ptr = __pyx_t_5; /* "cuda/bindings/runtime.pyx":20534 * cdef cudaMipmappedArray_t mipmappedArray = cudaMipmappedArray_t() * cdef cyruntime.cudaChannelFormatDesc* cydesc_ptr = desc._pvt_ptr if desc is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMallocMipmappedArray(mipmappedArray._pvt_ptr, cydesc_ptr, extent._pvt_ptr[0], numLevels, flags) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":20535 * cdef cyruntime.cudaChannelFormatDesc* cydesc_ptr = desc._pvt_ptr if desc is not None else NULL * with nogil: * err = cyruntime.cudaMallocMipmappedArray(mipmappedArray._pvt_ptr, cydesc_ptr, extent._pvt_ptr[0], numLevels, flags) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMallocMipmappedArray(((cudaMipmappedArray_t *)__pyx_v_mipmappedArray->_pvt_ptr), __pyx_v_cydesc_ptr, (__pyx_v_extent->_pvt_ptr[0]), __pyx_v_numLevels, __pyx_v_flags); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20535, __pyx_L4_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":20534 * cdef cudaMipmappedArray_t mipmappedArray = cudaMipmappedArray_t() * cdef cyruntime.cudaChannelFormatDesc* cydesc_ptr = desc._pvt_ptr if desc is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMallocMipmappedArray(mipmappedArray._pvt_ptr, cydesc_ptr, extent._pvt_ptr[0], numLevels, flags) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":20536 * with nogil: * err = cyruntime.cudaMallocMipmappedArray(mipmappedArray._pvt_ptr, cydesc_ptr, extent._pvt_ptr[0], numLevels, flags) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], mipmappedArray) */ __pyx_t_6 = (__pyx_v_err != cudaSuccess); if (__pyx_t_6) { /* "cuda/bindings/runtime.pyx":20537 * err = cyruntime.cudaMallocMipmappedArray(mipmappedArray._pvt_ptr, cydesc_ptr, extent._pvt_ptr[0], numLevels, flags) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], mipmappedArray) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20537, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20537, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20537, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20537, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 20537, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 20537, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":20536 * with nogil: * err = cyruntime.cudaMallocMipmappedArray(mipmappedArray._pvt_ptr, cydesc_ptr, extent._pvt_ptr[0], numLevels, flags) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], mipmappedArray) */ } /* "cuda/bindings/runtime.pyx":20538 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], mipmappedArray) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20538, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20538, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20538, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20538, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 20538, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_mipmappedArray); __Pyx_GIVEREF((PyObject *)__pyx_v_mipmappedArray); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_mipmappedArray)) != (0)) __PYX_ERR(0, 20538, __pyx_L1_error); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":20417 * return (_dict_cudaError_t[err], array) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMallocMipmappedArray(desc : Optional[cudaChannelFormatDesc], extent not None : cudaExtent, unsigned int numLevels, unsigned int flags): * """ Allocate a mipmapped array on the device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMallocMipmappedArray", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_mipmappedArray); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":20540 * return (_dict_cudaError_t[err], mipmappedArray) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGetMipmappedArrayLevel(mipmappedArray, unsigned int level): * """ Gets a mipmap level of a CUDA mipmapped array. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_213cudaGetMipmappedArrayLevel(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_212cudaGetMipmappedArrayLevel, "cudaGetMipmappedArrayLevel(mipmappedArray, unsigned int level)\n\nGets a mipmap level of a CUDA mipmapped array.\n\nReturns in `*levelArray` a CUDA array that represents a single mipmap\nlevel of the CUDA mipmapped array `mipmappedArray`.\n\nIf `level` is greater than the maximum number of levels in this\nmipmapped array, :py:obj:`~.cudaErrorInvalidValue` is returned.\n\nIf `mipmappedArray` is NULL, :py:obj:`~.cudaErrorInvalidResourceHandle`\nis returned.\n\nParameters\n----------\nmipmappedArray : :py:obj:`~.cudaMipmappedArray_const_t`\n CUDA mipmapped array\nlevel : unsigned int\n Mipmap level\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue` :py:obj:`~.cudaErrorInvalidResourceHandle`\nlevelArray : :py:obj:`~.cudaArray_t`\n Returned mipmap level CUDA array\n\nSee Also\n--------\n:py:obj:`~.cudaMalloc3D`, :py:obj:`~.cudaMalloc`, :py:obj:`~.cudaMallocPitch`, :py:obj:`~.cudaFree`, :py:obj:`~.cudaFreeArray`, :py:obj:`~.cudaMallocHost (C API)`, :py:obj:`~.cudaFreeHost`, :py:obj:`~.cudaHostAlloc`, :py:obj:`~.make_cudaExtent`, :py:obj:`~.cuMipmappedArrayGetLevel`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_213cudaGetMipmappedArrayLevel = {"cudaGetMipmappedArrayLevel", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_213cudaGetMipmappedArrayLevel, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_212cudaGetMipmappedArrayLevel}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_213cudaGetMipmappedArrayLevel(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_mipmappedArray = 0; unsigned int __pyx_v_level; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGetMipmappedArrayLevel (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_mipmappedArray,&__pyx_mstate_global->__pyx_n_u_level,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20540, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20540, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20540, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGetMipmappedArrayLevel", 0) < (0)) __PYX_ERR(0, 20540, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGetMipmappedArrayLevel", 1, 2, 2, i); __PYX_ERR(0, 20540, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20540, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20540, __pyx_L3_error) } __pyx_v_mipmappedArray = values[0]; __pyx_v_level = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_level == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20541, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGetMipmappedArrayLevel", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 20540, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGetMipmappedArrayLevel", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_212cudaGetMipmappedArrayLevel(__pyx_self, __pyx_v_mipmappedArray, __pyx_v_level); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_212cudaGetMipmappedArrayLevel(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_mipmappedArray, unsigned int __pyx_v_level) { cudaMipmappedArray_const_t __pyx_v_cymipmappedArray; PyObject *__pyx_v_pmipmappedArray = NULL; struct __pyx_obj_4cuda_8bindings_7runtime_cudaArray_t *__pyx_v_levelArray = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGetMipmappedArrayLevel", 0); /* "cuda/bindings/runtime.pyx":20572 * """ * cdef cyruntime.cudaMipmappedArray_const_t cymipmappedArray * if mipmappedArray is None: # <<<<<<<<<<<<<< * pmipmappedArray = 0 * elif isinstance(mipmappedArray, (cudaMipmappedArray_const_t,)): */ __pyx_t_1 = (__pyx_v_mipmappedArray == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":20573 * cdef cyruntime.cudaMipmappedArray_const_t cymipmappedArray * if mipmappedArray is None: * pmipmappedArray = 0 # <<<<<<<<<<<<<< * elif isinstance(mipmappedArray, (cudaMipmappedArray_const_t,)): * pmipmappedArray = int(mipmappedArray) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pmipmappedArray = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":20572 * """ * cdef cyruntime.cudaMipmappedArray_const_t cymipmappedArray * if mipmappedArray is None: # <<<<<<<<<<<<<< * pmipmappedArray = 0 * elif isinstance(mipmappedArray, (cudaMipmappedArray_const_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":20574 * if mipmappedArray is None: * pmipmappedArray = 0 * elif isinstance(mipmappedArray, (cudaMipmappedArray_const_t,)): # <<<<<<<<<<<<<< * pmipmappedArray = int(mipmappedArray) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_mipmappedArray, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMipmappedArray_const_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":20575 * pmipmappedArray = 0 * elif isinstance(mipmappedArray, (cudaMipmappedArray_const_t,)): * pmipmappedArray = int(mipmappedArray) # <<<<<<<<<<<<<< * else: * pmipmappedArray = int(cudaMipmappedArray_const_t(mipmappedArray)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_mipmappedArray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20575, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_pmipmappedArray = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":20574 * if mipmappedArray is None: * pmipmappedArray = 0 * elif isinstance(mipmappedArray, (cudaMipmappedArray_const_t,)): # <<<<<<<<<<<<<< * pmipmappedArray = int(mipmappedArray) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":20577 * pmipmappedArray = int(mipmappedArray) * else: * pmipmappedArray = int(cudaMipmappedArray_const_t(mipmappedArray)) # <<<<<<<<<<<<<< * cymipmappedArray = pmipmappedArray * cdef cudaArray_t levelArray = cudaArray_t() */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMipmappedArray_const_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMipmappedArray_const_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_mipmappedArray}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20577, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_pmipmappedArray = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":20578 * else: * pmipmappedArray = int(cudaMipmappedArray_const_t(mipmappedArray)) * cymipmappedArray = pmipmappedArray # <<<<<<<<<<<<<< * cdef cudaArray_t levelArray = cudaArray_t() * with nogil: */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pmipmappedArray); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 20578, __pyx_L1_error) __pyx_v_cymipmappedArray = ((cudaMipmappedArray_const_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":20579 * pmipmappedArray = int(cudaMipmappedArray_const_t(mipmappedArray)) * cymipmappedArray = pmipmappedArray * cdef cudaArray_t levelArray = cudaArray_t() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGetMipmappedArrayLevel(levelArray._pvt_ptr, cymipmappedArray, level) */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20579, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_4); } __pyx_v_levelArray = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaArray_t *)__pyx_t_4); __pyx_t_4 = 0; /* "cuda/bindings/runtime.pyx":20580 * cymipmappedArray = pmipmappedArray * cdef cudaArray_t levelArray = cudaArray_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGetMipmappedArrayLevel(levelArray._pvt_ptr, cymipmappedArray, level) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":20581 * cdef cudaArray_t levelArray = cudaArray_t() * with nogil: * err = cyruntime.cudaGetMipmappedArrayLevel(levelArray._pvt_ptr, cymipmappedArray, level) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGetMipmappedArrayLevel(((cudaArray_t *)__pyx_v_levelArray->_pvt_ptr), __pyx_v_cymipmappedArray, __pyx_v_level); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20581, __pyx_L5_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":20580 * cymipmappedArray = pmipmappedArray * cdef cudaArray_t levelArray = cudaArray_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGetMipmappedArrayLevel(levelArray._pvt_ptr, cymipmappedArray, level) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L6; } __pyx_L5_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L6:; } } /* "cuda/bindings/runtime.pyx":20582 * with nogil: * err = cyruntime.cudaGetMipmappedArrayLevel(levelArray._pvt_ptr, cymipmappedArray, level) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], levelArray) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":20583 * err = cyruntime.cudaGetMipmappedArrayLevel(levelArray._pvt_ptr, cymipmappedArray, level) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], levelArray) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20583, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20583, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20583, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20583, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 20583, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 20583, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":20582 * with nogil: * err = cyruntime.cudaGetMipmappedArrayLevel(levelArray._pvt_ptr, cymipmappedArray, level) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], levelArray) */ } /* "cuda/bindings/runtime.pyx":20584 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], levelArray) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20584, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20584, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20584, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20584, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 20584, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_levelArray); __Pyx_GIVEREF((PyObject *)__pyx_v_levelArray); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_levelArray)) != (0)) __PYX_ERR(0, 20584, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":20540 * return (_dict_cudaError_t[err], mipmappedArray) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGetMipmappedArrayLevel(mipmappedArray, unsigned int level): * """ Gets a mipmap level of a CUDA mipmapped array. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGetMipmappedArrayLevel", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pmipmappedArray); __Pyx_XDECREF((PyObject *)__pyx_v_levelArray); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":20586 * return (_dict_cudaError_t[err], levelArray) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpy3D(p : Optional[cudaMemcpy3DParms]): * """ Copies data between 3D objects. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_215cudaMemcpy3D(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_214cudaMemcpy3D, "cudaMemcpy3D(cudaMemcpy3DParms p: Optional[cudaMemcpy3DParms])\n\nCopies data between 3D objects.\n\n**View CUDA Toolkit Documentation for a C++ code example**\n\n:py:obj:`~.cudaMemcpy3D()` copies data betwen two 3D objects. The\nsource and destination objects may be in either host memory, device\nmemory, or a CUDA array. The source, destination, extent, and kind of\ncopy performed is specified by the :py:obj:`~.cudaMemcpy3DParms` struct\nwhich should be initialized to zero before use:\n\n**View CUDA Toolkit Documentation for a C++ code example**\n\nThe struct passed to :py:obj:`~.cudaMemcpy3D()` must specify one of\n`srcArray` or `srcPtr` and one of `dstArray` or `dstPtr`. Passing more\nthan one non-zero source or destination will cause\n:py:obj:`~.cudaMemcpy3D()` to return an error.\n\nThe `srcPos` and `dstPos` fields are optional offsets into the source\nand destination objects and are defined in units of each object's\nelements. The element for a host or device pointer is assumed to be\nunsigned char.\n\nThe `extent` field defines the dimensions of the transferred area in\nelements. If a CUDA array is participating in the copy, the extent is\ndefined in terms of that array's elements. If no CUDA array is\nparticipating in the copy then the extents are defined in elements of\nunsigned char.\n\nThe `kind` field defines the direction of the copy. It must be one of\n:py:obj:`~.cudaMemcpyHostToHost`, :py:obj:`~.cudaMemcpyHostToDevice`,\n:py:obj:`~.cudaMemcpyDeviceToHost`,\n:py:obj:`~.cudaMemcpyDeviceToDevice`, or :py:obj:`~.cudaMemcpyDefault`.\nPassing :py:obj:`~.cudaMemcpyDefault` is recommended, in which case the\ntype of transfer is inferred from the pointer values. However,\n:py:obj:`~.cudaMemcpyDefault` is only allowed on systems that support\nunified virtual addressing. For :py:obj:`~.cudaMemcpyHostToHost` or\n:py:obj:`~.cudaMemcpyHostToDevice` or\n:py:obj:`~.cudaMemcpyDeviceToHost` passed as kind and cudaArray type\npassed as source or destination, if the kind"" implies cudaArray type to\nbe present on the host, :py:obj:`~.cudaMemcpy3D()` will disregard that\nimplication and silently correct the kind based on the fact that\ncudaArray type can only be present on the device.\n\nIf the source and destination are both arrays,\n:py:obj:`~.cudaMemcpy3D()` will return an error if they do not have the\nsame element size.\n\nThe source and destination object may not overlap. If overlapping\nsource and destination objects are specified, undefined behavior will\nresult.\n\nThe source object must entirely contain the region defined by `srcPos`\nand `extent`. The destination object must entirely contain the region\ndefined by `dstPos` and `extent`.\n\n:py:obj:`~.cudaMemcpy3D()` returns an error if the pitch of `srcPtr` or\n`dstPtr` exceeds the maximum allowed. The pitch of a\n:py:obj:`~.cudaPitchedPtr` allocated with :py:obj:`~.cudaMalloc3D()`\nwill always be valid.\n\nParameters\n----------\np : :py:obj:`~.cudaMemcpy3DParms`\n 3D memory copy parameters\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidPitchValue`, :py:obj:`~.cudaErrorInvalidMemcpyDirection`\n\nSee Also\n--------\n:py:obj:`~.cudaMalloc3D`, :py:obj:`~.cudaMalloc3DArray`, :py:obj:`~.cudaMemset3D`, :py:obj:`~.cudaMemcpy3DAsync`, :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpy2D`, :py:obj:`~.cudaMemcpy2DToArray`, :py:obj:`~.cudaMemcpy2DFromArray`, :py:obj:`~.cudaMemcpy2DArrayToArray`, :py:obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy2DAsync`, :py:obj:`~.cudaMemcpy2DToArrayAsync`, :py:obj:`~.cudaMemcpy2DFromArrayAsync`, :py:obj:`~.cudaMemcpyToSymbolAsync`, :py:obj:`~.cudaMemcpyFromSymbolAsync`, :py:obj:`~.make_cudaExtent`, :py:obj:`~.make_cudaPos`, :py:obj:`~.cuMemcpy3D`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_215cudaMemcpy3D = {"cudaMemcpy3D", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_215cudaMemcpy3D, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_214cudaMemcpy3D}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_215cudaMemcpy3D(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemcpy3DParms *__pyx_v_p = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemcpy3D (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_p,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20586, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20586, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemcpy3D", 0) < (0)) __PYX_ERR(0, 20586, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemcpy3D", 1, 1, 1, i); __PYX_ERR(0, 20586, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20586, __pyx_L3_error) } __pyx_v_p = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemcpy3DParms *)values[0]); } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemcpy3D", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20586, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpy3D", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_p), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemcpy3DParms, 1, "p", 0))) __PYX_ERR(0, 20587, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_214cudaMemcpy3D(__pyx_self, __pyx_v_p); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_214cudaMemcpy3D(CYTHON_UNUSED PyObject *__pyx_self, struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemcpy3DParms *__pyx_v_p) { struct cudaMemcpy3DParms *__pyx_v_cyp_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations struct cudaMemcpy3DParms *__pyx_t_1; int __pyx_t_2; cudaError_t __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemcpy3D", 0); /* "cuda/bindings/runtime.pyx":20662 * :py:obj:`~.cudaMalloc3D`, :py:obj:`~.cudaMalloc3DArray`, :py:obj:`~.cudaMemset3D`, :py:obj:`~.cudaMemcpy3DAsync`, :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpy2D`, :py:obj:`~.cudaMemcpy2DToArray`, :py:obj:`~.cudaMemcpy2DFromArray`, :py:obj:`~.cudaMemcpy2DArrayToArray`, :py:obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy2DAsync`, :py:obj:`~.cudaMemcpy2DToArrayAsync`, :py:obj:`~.cudaMemcpy2DFromArrayAsync`, :py:obj:`~.cudaMemcpyToSymbolAsync`, :py:obj:`~.cudaMemcpyFromSymbolAsync`, :py:obj:`~.make_cudaExtent`, :py:obj:`~.make_cudaPos`, :py:obj:`~.cuMemcpy3D` * """ * cdef cyruntime.cudaMemcpy3DParms* cyp_ptr = p._pvt_ptr if p is not None else NULL # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemcpy3D(cyp_ptr) */ __pyx_t_2 = (((PyObject *)__pyx_v_p) != Py_None); if (__pyx_t_2) { __pyx_t_1 = __pyx_v_p->_pvt_ptr; } else { __pyx_t_1 = NULL; } __pyx_v_cyp_ptr = __pyx_t_1; /* "cuda/bindings/runtime.pyx":20663 * """ * cdef cyruntime.cudaMemcpy3DParms* cyp_ptr = p._pvt_ptr if p is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpy3D(cyp_ptr) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":20664 * cdef cyruntime.cudaMemcpy3DParms* cyp_ptr = p._pvt_ptr if p is not None else NULL * with nogil: * err = cyruntime.cudaMemcpy3D(cyp_ptr) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_3 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemcpy3D(__pyx_v_cyp_ptr); if (unlikely(__pyx_t_3 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20664, __pyx_L4_error) __pyx_v_err = __pyx_t_3; } /* "cuda/bindings/runtime.pyx":20663 * """ * cdef cyruntime.cudaMemcpy3DParms* cyp_ptr = p._pvt_ptr if p is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpy3D(cyp_ptr) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":20665 * with nogil: * err = cyruntime.cudaMemcpy3D(cyp_ptr) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20665, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20665, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 20665, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20665, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 20665, __pyx_L1_error); __pyx_t_6 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":20586 * return (_dict_cudaError_t[err], levelArray) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpy3D(p : Optional[cudaMemcpy3DParms]): * """ Copies data between 3D objects. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpy3D", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":20667 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpy3DPeer(p : Optional[cudaMemcpy3DPeerParms]): * """ Copies memory between devices. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_217cudaMemcpy3DPeer(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_216cudaMemcpy3DPeer, "cudaMemcpy3DPeer(cudaMemcpy3DPeerParms p: Optional[cudaMemcpy3DPeerParms])\n\nCopies memory between devices.\n\nPerform a 3D memory copy according to the parameters specified in `p`.\nSee the definition of the :py:obj:`~.cudaMemcpy3DPeerParms` structure\nfor documentation of its parameters.\n\nNote that this function is synchronous with respect to the host only if\nthe source or destination of the transfer is host memory. Note also\nthat this copy is serialized with respect to all pending and future\nasynchronous work in to the current device, the copy's source device,\nand the copy's destination device (use\n:py:obj:`~.cudaMemcpy3DPeerAsync` to avoid this synchronization).\n\nParameters\n----------\np : :py:obj:`~.cudaMemcpy3DPeerParms`\n Parameters for the memory copy\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidDevice`, :py:obj:`~.cudaErrorInvalidPitchValue`\n\nSee Also\n--------\n:py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpyPeer`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpyPeerAsync`, :py:obj:`~.cudaMemcpy3DPeerAsync`, :py:obj:`~.cuMemcpy3DPeer`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_217cudaMemcpy3DPeer = {"cudaMemcpy3DPeer", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_217cudaMemcpy3DPeer, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_216cudaMemcpy3DPeer}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_217cudaMemcpy3DPeer(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemcpy3DPeerParms *__pyx_v_p = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemcpy3DPeer (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_p,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20667, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20667, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemcpy3DPeer", 0) < (0)) __PYX_ERR(0, 20667, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemcpy3DPeer", 1, 1, 1, i); __PYX_ERR(0, 20667, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20667, __pyx_L3_error) } __pyx_v_p = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemcpy3DPeerParms *)values[0]); } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemcpy3DPeer", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20667, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpy3DPeer", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_p), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemcpy3DPeerParms, 1, "p", 0))) __PYX_ERR(0, 20668, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_216cudaMemcpy3DPeer(__pyx_self, __pyx_v_p); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_216cudaMemcpy3DPeer(CYTHON_UNUSED PyObject *__pyx_self, struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemcpy3DPeerParms *__pyx_v_p) { struct cudaMemcpy3DPeerParms *__pyx_v_cyp_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations struct cudaMemcpy3DPeerParms *__pyx_t_1; int __pyx_t_2; cudaError_t __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemcpy3DPeer", 0); /* "cuda/bindings/runtime.pyx":20696 * :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpyPeer`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpyPeerAsync`, :py:obj:`~.cudaMemcpy3DPeerAsync`, :py:obj:`~.cuMemcpy3DPeer` * """ * cdef cyruntime.cudaMemcpy3DPeerParms* cyp_ptr = p._pvt_ptr if p is not None else NULL # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemcpy3DPeer(cyp_ptr) */ __pyx_t_2 = (((PyObject *)__pyx_v_p) != Py_None); if (__pyx_t_2) { __pyx_t_1 = __pyx_v_p->_pvt_ptr; } else { __pyx_t_1 = NULL; } __pyx_v_cyp_ptr = __pyx_t_1; /* "cuda/bindings/runtime.pyx":20697 * """ * cdef cyruntime.cudaMemcpy3DPeerParms* cyp_ptr = p._pvt_ptr if p is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpy3DPeer(cyp_ptr) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":20698 * cdef cyruntime.cudaMemcpy3DPeerParms* cyp_ptr = p._pvt_ptr if p is not None else NULL * with nogil: * err = cyruntime.cudaMemcpy3DPeer(cyp_ptr) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_3 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemcpy3DPeer(__pyx_v_cyp_ptr); if (unlikely(__pyx_t_3 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20698, __pyx_L4_error) __pyx_v_err = __pyx_t_3; } /* "cuda/bindings/runtime.pyx":20697 * """ * cdef cyruntime.cudaMemcpy3DPeerParms* cyp_ptr = p._pvt_ptr if p is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpy3DPeer(cyp_ptr) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":20699 * with nogil: * err = cyruntime.cudaMemcpy3DPeer(cyp_ptr) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20699, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20699, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 20699, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20699, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_6) != (0)) __PYX_ERR(0, 20699, __pyx_L1_error); __pyx_t_6 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":20667 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpy3DPeer(p : Optional[cudaMemcpy3DPeerParms]): * """ Copies memory between devices. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpy3DPeer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":20701 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpy3DAsync(p : Optional[cudaMemcpy3DParms], stream): * """ Copies data between 3D objects. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_219cudaMemcpy3DAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_218cudaMemcpy3DAsync, "cudaMemcpy3DAsync(cudaMemcpy3DParms p: Optional[cudaMemcpy3DParms], stream)\n\nCopies data between 3D objects.\n\n**View CUDA Toolkit Documentation for a C++ code example**\n\n:py:obj:`~.cudaMemcpy3DAsync()` copies data betwen two 3D objects. The\nsource and destination objects may be in either host memory, device\nmemory, or a CUDA array. The source, destination, extent, and kind of\ncopy performed is specified by the :py:obj:`~.cudaMemcpy3DParms` struct\nwhich should be initialized to zero before use:\n\n**View CUDA Toolkit Documentation for a C++ code example**\n\nThe struct passed to :py:obj:`~.cudaMemcpy3DAsync()` must specify one\nof `srcArray` or `srcPtr` and one of `dstArray` or `dstPtr`. Passing\nmore than one non-zero source or destination will cause\n:py:obj:`~.cudaMemcpy3DAsync()` to return an error.\n\nThe `srcPos` and `dstPos` fields are optional offsets into the source\nand destination objects and are defined in units of each object's\nelements. The element for a host or device pointer is assumed to be\nunsigned char. For CUDA arrays, positions must be in the range [0,\n2048) for any dimension.\n\nThe `extent` field defines the dimensions of the transferred area in\nelements. If a CUDA array is participating in the copy, the extent is\ndefined in terms of that array's elements. If no CUDA array is\nparticipating in the copy then the extents are defined in elements of\nunsigned char.\n\nThe `kind` field defines the direction of the copy. It must be one of\n:py:obj:`~.cudaMemcpyHostToHost`, :py:obj:`~.cudaMemcpyHostToDevice`,\n:py:obj:`~.cudaMemcpyDeviceToHost`,\n:py:obj:`~.cudaMemcpyDeviceToDevice`, or :py:obj:`~.cudaMemcpyDefault`.\nPassing :py:obj:`~.cudaMemcpyDefault` is recommended, in which case the\ntype of transfer is inferred from the pointer values. However,\n:py:obj:`~.cudaMemcpyDefault` is only allowed on systems that support\nunified virtual addressing. For :py:obj:`~.cudaMemcpyHostToHost` or\n:py:obj:`~.cudaMemcpyHostToDevice` or\n:py:obj:""`~.cudaMemcpyDeviceToHost` passed as kind and cudaArray type\npassed as source or destination, if the kind implies cudaArray type to\nbe present on the host, :py:obj:`~.cudaMemcpy3DAsync()` will disregard\nthat implication and silently correct the kind based on the fact that\ncudaArray type can only be present on the device.\n\nIf the source and destination are both arrays,\n:py:obj:`~.cudaMemcpy3DAsync()` will return an error if they do not\nhave the same element size.\n\nThe source and destination object may not overlap. If overlapping\nsource and destination objects are specified, undefined behavior will\nresult.\n\nThe source object must lie entirely within the region defined by\n`srcPos` and `extent`. The destination object must lie entirely within\nthe region defined by `dstPos` and `extent`.\n\n:py:obj:`~.cudaMemcpy3DAsync()` returns an error if the pitch of\n`srcPtr` or `dstPtr` exceeds the maximum allowed. The pitch of a\n:py:obj:`~.cudaPitchedPtr` allocated with :py:obj:`~.cudaMalloc3D()`\nwill always be valid.\n\n:py:obj:`~.cudaMemcpy3DAsync()` is asynchronous with respect to the\nhost, so the call may return before the copy is complete. The copy can\noptionally be associated to a stream by passing a non-zero `stream`\nargument. If `kind` is :py:obj:`~.cudaMemcpyHostToDevice` or\n:py:obj:`~.cudaMemcpyDeviceToHost` and `stream` is non-zero, the copy\nmay overlap with operations in other streams.\n\nThe device version of this function only handles device to device\ncopies and cannot be given local or shared pointers.\n\nParameters\n----------\np : :py:obj:`~.cudaMemcpy3DParms`\n 3D memory copy parameters\nstream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Stream identifier\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidPitchValue`, :py:obj:`~.cudaErrorInvalidMemcpyDirection`\n\nSee Also\n--------\n:py:obj:`~.cudaMalloc3D`, :py:obj:`~.cudaMalloc3DArray`, :py:obj:`~.c""udaMemset3D`, :py:obj:`~.cudaMemcpy3D`, :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpy2D`, :py:obj:`~.cudaMemcpy2DToArray`, ::::py:obj:`~.cudaMemcpy2DFromArray`, :py:obj:`~.cudaMemcpy2DArrayToArray`, :py:obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy2DAsync`, :py:obj:`~.cudaMemcpy2DToArrayAsync`, :py:obj:`~.cudaMemcpy2DFromArrayAsync`, :py:obj:`~.cudaMemcpyToSymbolAsync`, :py:obj:`~.cudaMemcpyFromSymbolAsync`, :py:obj:`~.make_cudaExtent`, :py:obj:`~.make_cudaPos`, :py:obj:`~.cuMemcpy3DAsync`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_219cudaMemcpy3DAsync = {"cudaMemcpy3DAsync", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_219cudaMemcpy3DAsync, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_218cudaMemcpy3DAsync}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_219cudaMemcpy3DAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemcpy3DParms *__pyx_v_p = 0; PyObject *__pyx_v_stream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemcpy3DAsync (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_p,&__pyx_mstate_global->__pyx_n_u_stream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20701, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20701, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20701, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemcpy3DAsync", 0) < (0)) __PYX_ERR(0, 20701, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemcpy3DAsync", 1, 2, 2, i); __PYX_ERR(0, 20701, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20701, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20701, __pyx_L3_error) } __pyx_v_p = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemcpy3DParms *)values[0]); __pyx_v_stream = values[1]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemcpy3DAsync", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 20701, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpy3DAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_p), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemcpy3DParms, 1, "p", 0))) __PYX_ERR(0, 20702, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_218cudaMemcpy3DAsync(__pyx_self, __pyx_v_p, __pyx_v_stream); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_218cudaMemcpy3DAsync(CYTHON_UNUSED PyObject *__pyx_self, struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemcpy3DParms *__pyx_v_p, PyObject *__pyx_v_stream) { cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; struct cudaMemcpy3DParms *__pyx_v_cyp_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; struct cudaMemcpy3DParms *__pyx_t_8; cudaError_t __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemcpy3DAsync", 0); /* "cuda/bindings/runtime.pyx":20791 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":20792 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":20791 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":20793 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":20794 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20794, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":20793 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":20796 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * cdef cyruntime.cudaMemcpy3DParms* cyp_ptr = p._pvt_ptr if p is not None else NULL */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_stream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20796, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20796, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":20797 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemcpy3DParms* cyp_ptr = p._pvt_ptr if p is not None else NULL * with nogil: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 20797, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":20798 * pstream = int(cudaStream_t(stream)) * cystream = pstream * cdef cyruntime.cudaMemcpy3DParms* cyp_ptr = p._pvt_ptr if p is not None else NULL # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemcpy3DAsync(cyp_ptr, cystream) */ __pyx_t_1 = (((PyObject *)__pyx_v_p) != Py_None); if (__pyx_t_1) { __pyx_t_8 = __pyx_v_p->_pvt_ptr; } else { __pyx_t_8 = NULL; } __pyx_v_cyp_ptr = __pyx_t_8; /* "cuda/bindings/runtime.pyx":20799 * cystream = pstream * cdef cyruntime.cudaMemcpy3DParms* cyp_ptr = p._pvt_ptr if p is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpy3DAsync(cyp_ptr, cystream) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":20800 * cdef cyruntime.cudaMemcpy3DParms* cyp_ptr = p._pvt_ptr if p is not None else NULL * with nogil: * err = cyruntime.cudaMemcpy3DAsync(cyp_ptr, cystream) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_9 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemcpy3DAsync(__pyx_v_cyp_ptr, __pyx_v_cystream); if (unlikely(__pyx_t_9 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20800, __pyx_L7_error) __pyx_v_err = __pyx_t_9; } /* "cuda/bindings/runtime.pyx":20799 * cystream = pstream * cdef cyruntime.cudaMemcpy3DParms* cyp_ptr = p._pvt_ptr if p is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpy3DAsync(cyp_ptr, cystream) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":20801 * with nogil: * err = cyruntime.cudaMemcpy3DAsync(cyp_ptr, cystream) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20801, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20801, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20801, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20801, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 20801, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":20701 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpy3DAsync(p : Optional[cudaMemcpy3DParms], stream): * """ Copies data between 3D objects. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpy3DAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":20803 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpy3DPeerAsync(p : Optional[cudaMemcpy3DPeerParms], stream): * """ Copies memory between devices asynchronously. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_221cudaMemcpy3DPeerAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_220cudaMemcpy3DPeerAsync, "cudaMemcpy3DPeerAsync(cudaMemcpy3DPeerParms p: Optional[cudaMemcpy3DPeerParms], stream)\n\nCopies memory between devices asynchronously.\n\nPerform a 3D memory copy according to the parameters specified in `p`.\nSee the definition of the :py:obj:`~.cudaMemcpy3DPeerParms` structure\nfor documentation of its parameters.\n\nParameters\n----------\np : :py:obj:`~.cudaMemcpy3DPeerParms`\n Parameters for the memory copy\nstream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Stream identifier\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidDevice`, :py:obj:`~.cudaErrorInvalidPitchValue`\n\nSee Also\n--------\n:py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpyPeer`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpyPeerAsync`, :py:obj:`~.cudaMemcpy3DPeerAsync`, :py:obj:`~.cuMemcpy3DPeerAsync`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_221cudaMemcpy3DPeerAsync = {"cudaMemcpy3DPeerAsync", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_221cudaMemcpy3DPeerAsync, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_220cudaMemcpy3DPeerAsync}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_221cudaMemcpy3DPeerAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemcpy3DPeerParms *__pyx_v_p = 0; PyObject *__pyx_v_stream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemcpy3DPeerAsync (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_p,&__pyx_mstate_global->__pyx_n_u_stream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20803, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20803, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20803, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemcpy3DPeerAsync", 0) < (0)) __PYX_ERR(0, 20803, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemcpy3DPeerAsync", 1, 2, 2, i); __PYX_ERR(0, 20803, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20803, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20803, __pyx_L3_error) } __pyx_v_p = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemcpy3DPeerParms *)values[0]); __pyx_v_stream = values[1]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemcpy3DPeerAsync", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 20803, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpy3DPeerAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_p), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemcpy3DPeerParms, 1, "p", 0))) __PYX_ERR(0, 20804, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_220cudaMemcpy3DPeerAsync(__pyx_self, __pyx_v_p, __pyx_v_stream); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_220cudaMemcpy3DPeerAsync(CYTHON_UNUSED PyObject *__pyx_self, struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemcpy3DPeerParms *__pyx_v_p, PyObject *__pyx_v_stream) { cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; struct cudaMemcpy3DPeerParms *__pyx_v_cyp_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; struct cudaMemcpy3DPeerParms *__pyx_t_8; cudaError_t __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemcpy3DPeerAsync", 0); /* "cuda/bindings/runtime.pyx":20828 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":20829 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":20828 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":20830 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":20831 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20831, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":20830 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":20833 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * cdef cyruntime.cudaMemcpy3DPeerParms* cyp_ptr = p._pvt_ptr if p is not None else NULL */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_stream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20833, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20833, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":20834 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemcpy3DPeerParms* cyp_ptr = p._pvt_ptr if p is not None else NULL * with nogil: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 20834, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":20835 * pstream = int(cudaStream_t(stream)) * cystream = pstream * cdef cyruntime.cudaMemcpy3DPeerParms* cyp_ptr = p._pvt_ptr if p is not None else NULL # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemcpy3DPeerAsync(cyp_ptr, cystream) */ __pyx_t_1 = (((PyObject *)__pyx_v_p) != Py_None); if (__pyx_t_1) { __pyx_t_8 = __pyx_v_p->_pvt_ptr; } else { __pyx_t_8 = NULL; } __pyx_v_cyp_ptr = __pyx_t_8; /* "cuda/bindings/runtime.pyx":20836 * cystream = pstream * cdef cyruntime.cudaMemcpy3DPeerParms* cyp_ptr = p._pvt_ptr if p is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpy3DPeerAsync(cyp_ptr, cystream) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":20837 * cdef cyruntime.cudaMemcpy3DPeerParms* cyp_ptr = p._pvt_ptr if p is not None else NULL * with nogil: * err = cyruntime.cudaMemcpy3DPeerAsync(cyp_ptr, cystream) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_9 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemcpy3DPeerAsync(__pyx_v_cyp_ptr, __pyx_v_cystream); if (unlikely(__pyx_t_9 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20837, __pyx_L7_error) __pyx_v_err = __pyx_t_9; } /* "cuda/bindings/runtime.pyx":20836 * cystream = pstream * cdef cyruntime.cudaMemcpy3DPeerParms* cyp_ptr = p._pvt_ptr if p is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpy3DPeerAsync(cyp_ptr, cystream) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":20838 * with nogil: * err = cyruntime.cudaMemcpy3DPeerAsync(cyp_ptr, cystream) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20838, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20838, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20838, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20838, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 20838, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":20803 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpy3DPeerAsync(p : Optional[cudaMemcpy3DPeerParms], stream): * """ Copies memory between devices asynchronously. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpy3DPeerAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":20840 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemGetInfo(): * """ Gets free and total device memory. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_223cudaMemGetInfo(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_222cudaMemGetInfo, "cudaMemGetInfo()\n\nGets free and total device memory.\n\nReturns in `*total` the total amount of memory available to the the\ncurrent context. Returns in `*free` the amount of memory on the device\nthat is free according to the OS. CUDA is not guaranteed to be able to\nallocate all of the memory that the OS reports as free. In a multi-\ntenet situation, free estimate returned is prone to race condition\nwhere a new allocation/free done by a different process or a different\nthread in the same process between the time when free memory was\nestimated and reported, will result in deviation in free value reported\nand actual free memory.\n\nThe integrated GPU on Tegra shares memory with CPU and other component\nof the SoC. The free and total values returned by the API excludes the\nSWAP memory space maintained by the OS on some platforms. The OS may\nmove some of the memory pages into swap area as the GPU or CPU allocate\nor access memory. See Tegra app note on how to calculate total and free\nmemory on Tegra.\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorLaunchFailure`\nfree : int\n Returned free memory in bytes\ntotal : int\n Returned total memory in bytes\n\nSee Also\n--------\n:py:obj:`~.cuMemGetInfo`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_223cudaMemGetInfo = {"cudaMemGetInfo", (PyCFunction)__pyx_pw_4cuda_8bindings_7runtime_223cudaMemGetInfo, METH_NOARGS, __pyx_doc_4cuda_8bindings_7runtime_222cudaMemGetInfo}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_223cudaMemGetInfo(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemGetInfo (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_222cudaMemGetInfo(__pyx_self); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_222cudaMemGetInfo(CYTHON_UNUSED PyObject *__pyx_self) { size_t __pyx_v_free; size_t __pyx_v_total; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations cudaError_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemGetInfo", 0); /* "cuda/bindings/runtime.pyx":20874 * :py:obj:`~.cuMemGetInfo` * """ * cdef size_t free = 0 # <<<<<<<<<<<<<< * cdef size_t total = 0 * with nogil: */ __pyx_v_free = 0; /* "cuda/bindings/runtime.pyx":20875 * """ * cdef size_t free = 0 * cdef size_t total = 0 # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemGetInfo(&free, &total) */ __pyx_v_total = 0; /* "cuda/bindings/runtime.pyx":20876 * cdef size_t free = 0 * cdef size_t total = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemGetInfo(&free, &total) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":20877 * cdef size_t total = 0 * with nogil: * err = cyruntime.cudaMemGetInfo(&free, &total) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None, None) */ __pyx_t_1 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemGetInfo((&__pyx_v_free), (&__pyx_v_total)); if (unlikely(__pyx_t_1 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20877, __pyx_L4_error) __pyx_v_err = __pyx_t_1; } /* "cuda/bindings/runtime.pyx":20876 * cdef size_t free = 0 * cdef size_t total = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemGetInfo(&free, &total) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":20878 * with nogil: * err = cyruntime.cudaMemGetInfo(&free, &total) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None, None) * return (_dict_cudaError_t[err], free, total) */ __pyx_t_2 = (__pyx_v_err != cudaSuccess); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":20879 * err = cyruntime.cudaMemGetInfo(&free, &total) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None, None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], free, total) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20879, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20879, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20879, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20879, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 20879, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, Py_None) != (0)) __PYX_ERR(0, 20879, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 2, Py_None) != (0)) __PYX_ERR(0, 20879, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":20878 * with nogil: * err = cyruntime.cudaMemGetInfo(&free, &total) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None, None) * return (_dict_cudaError_t[err], free, total) */ } /* "cuda/bindings/runtime.pyx":20880 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None, None) * return (_dict_cudaError_t[err], free, total) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20880, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20880, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20880, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyLong_FromSize_t(__pyx_v_free); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20880, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyLong_FromSize_t(__pyx_v_total); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20880, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyTuple_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 20880, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 20880, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_5) != (0)) __PYX_ERR(0, 20880, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 2, __pyx_t_4) != (0)) __PYX_ERR(0, 20880, __pyx_L1_error); __pyx_t_3 = 0; __pyx_t_5 = 0; __pyx_t_4 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":20840 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemGetInfo(): * """ Gets free and total device memory. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemGetInfo", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":20882 * return (_dict_cudaError_t[err], free, total) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaArrayGetInfo(array): * """ Gets info about the specified cudaArray. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_225cudaArrayGetInfo(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_224cudaArrayGetInfo, "cudaArrayGetInfo(array)\n\nGets info about the specified cudaArray.\n\nReturns in `*desc`, `*extent` and `*flags` respectively, the type,\nshape and flags of `array`.\n\nAny of `*desc`, `*extent` and `*flags` may be specified as NULL.\n\nParameters\n----------\narray : :py:obj:`~.cudaArray_t`\n The :py:obj:`~.cudaArray` to get info for\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\ndesc : :py:obj:`~.cudaChannelFormatDesc`\n Returned array type\nextent : :py:obj:`~.cudaExtent`\n Returned array shape. 2D arrays will have depth of zero\nflags : unsigned int\n Returned array flags\n\nSee Also\n--------\n:py:obj:`~.cuArrayGetDescriptor`, :py:obj:`~.cuArray3DGetDescriptor`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_225cudaArrayGetInfo = {"cudaArrayGetInfo", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_225cudaArrayGetInfo, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_224cudaArrayGetInfo}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_225cudaArrayGetInfo(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_array = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaArrayGetInfo (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_array_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20882, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20882, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaArrayGetInfo", 0) < (0)) __PYX_ERR(0, 20882, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaArrayGetInfo", 1, 1, 1, i); __PYX_ERR(0, 20882, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20882, __pyx_L3_error) } __pyx_v_array = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaArrayGetInfo", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 20882, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaArrayGetInfo", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_224cudaArrayGetInfo(__pyx_self, __pyx_v_array); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_224cudaArrayGetInfo(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_array) { cudaArray_t __pyx_v_cyarray; PyObject *__pyx_v_parray = NULL; struct __pyx_obj_4cuda_8bindings_7runtime_cudaChannelFormatDesc *__pyx_v_desc = 0; struct __pyx_obj_4cuda_8bindings_7runtime_cudaExtent *__pyx_v_extent = 0; unsigned int __pyx_v_flags; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaArrayGetInfo", 0); /* "cuda/bindings/runtime.pyx":20912 * """ * cdef cyruntime.cudaArray_t cyarray * if array is None: # <<<<<<<<<<<<<< * parray = 0 * elif isinstance(array, (cudaArray_t,)): */ __pyx_t_1 = (__pyx_v_array == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":20913 * cdef cyruntime.cudaArray_t cyarray * if array is None: * parray = 0 # <<<<<<<<<<<<<< * elif isinstance(array, (cudaArray_t,)): * parray = int(array) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_parray = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":20912 * """ * cdef cyruntime.cudaArray_t cyarray * if array is None: # <<<<<<<<<<<<<< * parray = 0 * elif isinstance(array, (cudaArray_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":20914 * if array is None: * parray = 0 * elif isinstance(array, (cudaArray_t,)): # <<<<<<<<<<<<<< * parray = int(array) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_array, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":20915 * parray = 0 * elif isinstance(array, (cudaArray_t,)): * parray = int(array) # <<<<<<<<<<<<<< * else: * parray = int(cudaArray_t(array)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_array); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20915, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_parray = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":20914 * if array is None: * parray = 0 * elif isinstance(array, (cudaArray_t,)): # <<<<<<<<<<<<<< * parray = int(array) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":20917 * parray = int(array) * else: * parray = int(cudaArray_t(array)) # <<<<<<<<<<<<<< * cyarray = parray * cdef cudaChannelFormatDesc desc = cudaChannelFormatDesc() */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_array}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20917, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20917, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_parray = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":20918 * else: * parray = int(cudaArray_t(array)) * cyarray = parray # <<<<<<<<<<<<<< * cdef cudaChannelFormatDesc desc = cudaChannelFormatDesc() * cdef cudaExtent extent = cudaExtent() */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_parray); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 20918, __pyx_L1_error) __pyx_v_cyarray = ((cudaArray_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":20919 * parray = int(cudaArray_t(array)) * cyarray = parray * cdef cudaChannelFormatDesc desc = cudaChannelFormatDesc() # <<<<<<<<<<<<<< * cdef cudaExtent extent = cudaExtent() * cdef unsigned int flags = 0 */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaChannelFormatDesc); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaChannelFormatDesc); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20919, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_4); } __pyx_v_desc = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaChannelFormatDesc *)__pyx_t_4); __pyx_t_4 = 0; /* "cuda/bindings/runtime.pyx":20920 * cyarray = parray * cdef cudaChannelFormatDesc desc = cudaChannelFormatDesc() * cdef cudaExtent extent = cudaExtent() # <<<<<<<<<<<<<< * cdef unsigned int flags = 0 * with nogil: */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaExtent); __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaExtent); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, NULL}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20920, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_4); } __pyx_v_extent = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaExtent *)__pyx_t_4); __pyx_t_4 = 0; /* "cuda/bindings/runtime.pyx":20921 * cdef cudaChannelFormatDesc desc = cudaChannelFormatDesc() * cdef cudaExtent extent = cudaExtent() * cdef unsigned int flags = 0 # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaArrayGetInfo(desc._pvt_ptr, extent._pvt_ptr, &flags, cyarray) */ __pyx_v_flags = 0; /* "cuda/bindings/runtime.pyx":20922 * cdef cudaExtent extent = cudaExtent() * cdef unsigned int flags = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaArrayGetInfo(desc._pvt_ptr, extent._pvt_ptr, &flags, cyarray) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":20923 * cdef unsigned int flags = 0 * with nogil: * err = cyruntime.cudaArrayGetInfo(desc._pvt_ptr, extent._pvt_ptr, &flags, cyarray) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None, None, None) */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaArrayGetInfo(((struct cudaChannelFormatDesc *)__pyx_v_desc->_pvt_ptr), ((struct cudaExtent *)__pyx_v_extent->_pvt_ptr), (&__pyx_v_flags), __pyx_v_cyarray); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20923, __pyx_L5_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":20922 * cdef cudaExtent extent = cudaExtent() * cdef unsigned int flags = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaArrayGetInfo(desc._pvt_ptr, extent._pvt_ptr, &flags, cyarray) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L6; } __pyx_L5_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L6:; } } /* "cuda/bindings/runtime.pyx":20924 * with nogil: * err = cyruntime.cudaArrayGetInfo(desc._pvt_ptr, extent._pvt_ptr, &flags, cyarray) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None, None, None) * return (_dict_cudaError_t[err], desc, extent, flags) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":20925 * err = cyruntime.cudaArrayGetInfo(desc._pvt_ptr, extent._pvt_ptr, &flags, cyarray) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None, None, None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], desc, extent, flags) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20925, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20925, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20925, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20925, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 20925, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, Py_None) != (0)) __PYX_ERR(0, 20925, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 2, Py_None) != (0)) __PYX_ERR(0, 20925, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 3, Py_None) != (0)) __PYX_ERR(0, 20925, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":20924 * with nogil: * err = cyruntime.cudaArrayGetInfo(desc._pvt_ptr, extent._pvt_ptr, &flags, cyarray) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None, None, None) * return (_dict_cudaError_t[err], desc, extent, flags) */ } /* "cuda/bindings/runtime.pyx":20926 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None, None, None) * return (_dict_cudaError_t[err], desc, extent, flags) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20926, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20926, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20926, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyLong_From_unsigned_int(__pyx_v_flags); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20926, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20926, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 20926, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_desc); __Pyx_GIVEREF((PyObject *)__pyx_v_desc); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_desc)) != (0)) __PYX_ERR(0, 20926, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_extent); __Pyx_GIVEREF((PyObject *)__pyx_v_extent); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 2, ((PyObject *)__pyx_v_extent)) != (0)) __PYX_ERR(0, 20926, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3) != (0)) __PYX_ERR(0, 20926, __pyx_L1_error); __pyx_t_4 = 0; __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":20882 * return (_dict_cudaError_t[err], free, total) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaArrayGetInfo(array): * """ Gets info about the specified cudaArray. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaArrayGetInfo", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_parray); __Pyx_XDECREF((PyObject *)__pyx_v_desc); __Pyx_XDECREF((PyObject *)__pyx_v_extent); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":20928 * return (_dict_cudaError_t[err], desc, extent, flags) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaArrayGetPlane(hArray, unsigned int planeIdx): * """ Gets a CUDA array plane from a CUDA array. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_227cudaArrayGetPlane(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_226cudaArrayGetPlane, "cudaArrayGetPlane(hArray, unsigned int planeIdx)\n\nGets a CUDA array plane from a CUDA array.\n\nReturns in `pPlaneArray` a CUDA array that represents a single format\nplane of the CUDA array `hArray`.\n\nIf `planeIdx` is greater than the maximum number of planes in this\narray or if the array does not have a multi-planar format e.g:\n:py:obj:`~.cudaChannelFormatKindNV12`, then\n:py:obj:`~.cudaErrorInvalidValue` is returned.\n\nNote that if the `hArray` has format\n:py:obj:`~.cudaChannelFormatKindNV12`, then passing in 0 for `planeIdx`\nreturns a CUDA array of the same size as `hArray` but with one 8-bit\nchannel and :py:obj:`~.cudaChannelFormatKindUnsigned` as its format\nkind. If 1 is passed for `planeIdx`, then the returned CUDA array has\nhalf the height and width of `hArray` with two 8-bit channels and\n:py:obj:`~.cudaChannelFormatKindUnsigned` as its format kind.\n\nParameters\n----------\nhArray : :py:obj:`~.cudaArray_t`\n CUDA array\nplaneIdx : unsigned int\n Plane index\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue` :py:obj:`~.cudaErrorInvalidResourceHandle`\npPlaneArray : :py:obj:`~.cudaArray_t`\n Returned CUDA array referenced by the `planeIdx`\n\nSee Also\n--------\n:py:obj:`~.cuArrayGetPlane`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_227cudaArrayGetPlane = {"cudaArrayGetPlane", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_227cudaArrayGetPlane, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_226cudaArrayGetPlane}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_227cudaArrayGetPlane(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_hArray = 0; unsigned int __pyx_v_planeIdx; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaArrayGetPlane (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_hArray,&__pyx_mstate_global->__pyx_n_u_planeIdx,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20928, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20928, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20928, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaArrayGetPlane", 0) < (0)) __PYX_ERR(0, 20928, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaArrayGetPlane", 1, 2, 2, i); __PYX_ERR(0, 20928, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20928, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20928, __pyx_L3_error) } __pyx_v_hArray = values[0]; __pyx_v_planeIdx = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_planeIdx == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20929, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaArrayGetPlane", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 20928, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaArrayGetPlane", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_226cudaArrayGetPlane(__pyx_self, __pyx_v_hArray, __pyx_v_planeIdx); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_226cudaArrayGetPlane(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_hArray, unsigned int __pyx_v_planeIdx) { cudaArray_t __pyx_v_cyhArray; PyObject *__pyx_v_phArray = NULL; struct __pyx_obj_4cuda_8bindings_7runtime_cudaArray_t *__pyx_v_pPlaneArray = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaArrayGetPlane", 0); /* "cuda/bindings/runtime.pyx":20967 * """ * cdef cyruntime.cudaArray_t cyhArray * if hArray is None: # <<<<<<<<<<<<<< * phArray = 0 * elif isinstance(hArray, (cudaArray_t,)): */ __pyx_t_1 = (__pyx_v_hArray == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":20968 * cdef cyruntime.cudaArray_t cyhArray * if hArray is None: * phArray = 0 # <<<<<<<<<<<<<< * elif isinstance(hArray, (cudaArray_t,)): * phArray = int(hArray) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_phArray = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":20967 * """ * cdef cyruntime.cudaArray_t cyhArray * if hArray is None: # <<<<<<<<<<<<<< * phArray = 0 * elif isinstance(hArray, (cudaArray_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":20969 * if hArray is None: * phArray = 0 * elif isinstance(hArray, (cudaArray_t,)): # <<<<<<<<<<<<<< * phArray = int(hArray) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_hArray, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":20970 * phArray = 0 * elif isinstance(hArray, (cudaArray_t,)): * phArray = int(hArray) # <<<<<<<<<<<<<< * else: * phArray = int(cudaArray_t(hArray)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_hArray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20970, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_phArray = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":20969 * if hArray is None: * phArray = 0 * elif isinstance(hArray, (cudaArray_t,)): # <<<<<<<<<<<<<< * phArray = int(hArray) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":20972 * phArray = int(hArray) * else: * phArray = int(cudaArray_t(hArray)) # <<<<<<<<<<<<<< * cyhArray = phArray * cdef cudaArray_t pPlaneArray = cudaArray_t() */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_hArray}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20972, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20972, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_phArray = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":20973 * else: * phArray = int(cudaArray_t(hArray)) * cyhArray = phArray # <<<<<<<<<<<<<< * cdef cudaArray_t pPlaneArray = cudaArray_t() * with nogil: */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_phArray); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 20973, __pyx_L1_error) __pyx_v_cyhArray = ((cudaArray_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":20974 * phArray = int(cudaArray_t(hArray)) * cyhArray = phArray * cdef cudaArray_t pPlaneArray = cudaArray_t() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaArrayGetPlane(pPlaneArray._pvt_ptr, cyhArray, planeIdx) */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20974, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_4); } __pyx_v_pPlaneArray = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaArray_t *)__pyx_t_4); __pyx_t_4 = 0; /* "cuda/bindings/runtime.pyx":20975 * cyhArray = phArray * cdef cudaArray_t pPlaneArray = cudaArray_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaArrayGetPlane(pPlaneArray._pvt_ptr, cyhArray, planeIdx) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":20976 * cdef cudaArray_t pPlaneArray = cudaArray_t() * with nogil: * err = cyruntime.cudaArrayGetPlane(pPlaneArray._pvt_ptr, cyhArray, planeIdx) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaArrayGetPlane(((cudaArray_t *)__pyx_v_pPlaneArray->_pvt_ptr), __pyx_v_cyhArray, __pyx_v_planeIdx); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 20976, __pyx_L5_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":20975 * cyhArray = phArray * cdef cudaArray_t pPlaneArray = cudaArray_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaArrayGetPlane(pPlaneArray._pvt_ptr, cyhArray, planeIdx) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L6; } __pyx_L5_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L6:; } } /* "cuda/bindings/runtime.pyx":20977 * with nogil: * err = cyruntime.cudaArrayGetPlane(pPlaneArray._pvt_ptr, cyhArray, planeIdx) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pPlaneArray) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":20978 * err = cyruntime.cudaArrayGetPlane(pPlaneArray._pvt_ptr, cyhArray, planeIdx) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], pPlaneArray) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20978, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20978, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20978, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20978, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 20978, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 20978, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":20977 * with nogil: * err = cyruntime.cudaArrayGetPlane(pPlaneArray._pvt_ptr, cyhArray, planeIdx) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pPlaneArray) */ } /* "cuda/bindings/runtime.pyx":20979 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pPlaneArray) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20979, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20979, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20979, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20979, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 20979, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_pPlaneArray); __Pyx_GIVEREF((PyObject *)__pyx_v_pPlaneArray); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_pPlaneArray)) != (0)) __PYX_ERR(0, 20979, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":20928 * return (_dict_cudaError_t[err], desc, extent, flags) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaArrayGetPlane(hArray, unsigned int planeIdx): * """ Gets a CUDA array plane from a CUDA array. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaArrayGetPlane", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_phArray); __Pyx_XDECREF((PyObject *)__pyx_v_pPlaneArray); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":20981 * return (_dict_cudaError_t[err], pPlaneArray) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaArrayGetMemoryRequirements(array, int device): * """ Returns the memory requirements of a CUDA array. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_229cudaArrayGetMemoryRequirements(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_228cudaArrayGetMemoryRequirements, "cudaArrayGetMemoryRequirements(array, int device)\n\nReturns the memory requirements of a CUDA array.\n\nReturns the memory requirements of a CUDA array in `memoryRequirements`\nIf the CUDA array is not allocated with flag\n:py:obj:`~.cudaArrayDeferredMapping` :py:obj:`~.cudaErrorInvalidValue`\nwill be returned.\n\nThe returned value in :py:obj:`~.cudaArrayMemoryRequirements.size`\nrepresents the total size of the CUDA array. The returned value in\n:py:obj:`~.cudaArrayMemoryRequirements.alignment` represents the\nalignment necessary for mapping the CUDA array.\n\nParameters\n----------\narray : :py:obj:`~.cudaArray_t`\n CUDA array to get the memory requirements of\ndevice : int\n Device to get the memory requirements for\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess` :py:obj:`~.cudaErrorInvalidValue`\nmemoryRequirements : :py:obj:`~.cudaArrayMemoryRequirements`\n Pointer to :py:obj:`~.cudaArrayMemoryRequirements`\n\nSee Also\n--------\n:py:obj:`~.cudaMipmappedArrayGetMemoryRequirements`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_229cudaArrayGetMemoryRequirements = {"cudaArrayGetMemoryRequirements", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_229cudaArrayGetMemoryRequirements, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_228cudaArrayGetMemoryRequirements}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_229cudaArrayGetMemoryRequirements(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_array = 0; int __pyx_v_device; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaArrayGetMemoryRequirements (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_array_2,&__pyx_mstate_global->__pyx_n_u_device_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 20981, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20981, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20981, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaArrayGetMemoryRequirements", 0) < (0)) __PYX_ERR(0, 20981, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaArrayGetMemoryRequirements", 1, 2, 2, i); __PYX_ERR(0, 20981, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 20981, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 20981, __pyx_L3_error) } __pyx_v_array = values[0]; __pyx_v_device = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_device == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 20982, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaArrayGetMemoryRequirements", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 20981, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaArrayGetMemoryRequirements", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_228cudaArrayGetMemoryRequirements(__pyx_self, __pyx_v_array, __pyx_v_device); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_228cudaArrayGetMemoryRequirements(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_array, int __pyx_v_device) { cudaArray_t __pyx_v_cyarray; PyObject *__pyx_v_parray = NULL; struct __pyx_obj_4cuda_8bindings_7runtime_cudaArrayMemoryRequirements *__pyx_v_memoryRequirements = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaArrayGetMemoryRequirements", 0); /* "cuda/bindings/runtime.pyx":21014 * """ * cdef cyruntime.cudaArray_t cyarray * if array is None: # <<<<<<<<<<<<<< * parray = 0 * elif isinstance(array, (cudaArray_t,)): */ __pyx_t_1 = (__pyx_v_array == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":21015 * cdef cyruntime.cudaArray_t cyarray * if array is None: * parray = 0 # <<<<<<<<<<<<<< * elif isinstance(array, (cudaArray_t,)): * parray = int(array) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_parray = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":21014 * """ * cdef cyruntime.cudaArray_t cyarray * if array is None: # <<<<<<<<<<<<<< * parray = 0 * elif isinstance(array, (cudaArray_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":21016 * if array is None: * parray = 0 * elif isinstance(array, (cudaArray_t,)): # <<<<<<<<<<<<<< * parray = int(array) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_array, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":21017 * parray = 0 * elif isinstance(array, (cudaArray_t,)): * parray = int(array) # <<<<<<<<<<<<<< * else: * parray = int(cudaArray_t(array)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_array); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21017, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_parray = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":21016 * if array is None: * parray = 0 * elif isinstance(array, (cudaArray_t,)): # <<<<<<<<<<<<<< * parray = int(array) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":21019 * parray = int(array) * else: * parray = int(cudaArray_t(array)) # <<<<<<<<<<<<<< * cyarray = parray * cdef cudaArrayMemoryRequirements memoryRequirements = cudaArrayMemoryRequirements() */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_array}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21019, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21019, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_parray = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":21020 * else: * parray = int(cudaArray_t(array)) * cyarray = parray # <<<<<<<<<<<<<< * cdef cudaArrayMemoryRequirements memoryRequirements = cudaArrayMemoryRequirements() * with nogil: */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_parray); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 21020, __pyx_L1_error) __pyx_v_cyarray = ((cudaArray_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":21021 * parray = int(cudaArray_t(array)) * cyarray = parray * cdef cudaArrayMemoryRequirements memoryRequirements = cudaArrayMemoryRequirements() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaArrayGetMemoryRequirements(memoryRequirements._pvt_ptr, cyarray, device) */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArrayMemoryRequirements); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArrayMemoryRequirements); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21021, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_4); } __pyx_v_memoryRequirements = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaArrayMemoryRequirements *)__pyx_t_4); __pyx_t_4 = 0; /* "cuda/bindings/runtime.pyx":21022 * cyarray = parray * cdef cudaArrayMemoryRequirements memoryRequirements = cudaArrayMemoryRequirements() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaArrayGetMemoryRequirements(memoryRequirements._pvt_ptr, cyarray, device) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":21023 * cdef cudaArrayMemoryRequirements memoryRequirements = cudaArrayMemoryRequirements() * with nogil: * err = cyruntime.cudaArrayGetMemoryRequirements(memoryRequirements._pvt_ptr, cyarray, device) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaArrayGetMemoryRequirements(((struct cudaArrayMemoryRequirements *)__pyx_v_memoryRequirements->_pvt_ptr), __pyx_v_cyarray, __pyx_v_device); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21023, __pyx_L5_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":21022 * cyarray = parray * cdef cudaArrayMemoryRequirements memoryRequirements = cudaArrayMemoryRequirements() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaArrayGetMemoryRequirements(memoryRequirements._pvt_ptr, cyarray, device) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L6; } __pyx_L5_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L6:; } } /* "cuda/bindings/runtime.pyx":21024 * with nogil: * err = cyruntime.cudaArrayGetMemoryRequirements(memoryRequirements._pvt_ptr, cyarray, device) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], memoryRequirements) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":21025 * err = cyruntime.cudaArrayGetMemoryRequirements(memoryRequirements._pvt_ptr, cyarray, device) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], memoryRequirements) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21025, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21025, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21025, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21025, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 21025, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 21025, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":21024 * with nogil: * err = cyruntime.cudaArrayGetMemoryRequirements(memoryRequirements._pvt_ptr, cyarray, device) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], memoryRequirements) */ } /* "cuda/bindings/runtime.pyx":21026 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], memoryRequirements) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21026, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21026, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21026, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21026, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 21026, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_memoryRequirements); __Pyx_GIVEREF((PyObject *)__pyx_v_memoryRequirements); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_memoryRequirements)) != (0)) __PYX_ERR(0, 21026, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":20981 * return (_dict_cudaError_t[err], pPlaneArray) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaArrayGetMemoryRequirements(array, int device): * """ Returns the memory requirements of a CUDA array. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaArrayGetMemoryRequirements", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_parray); __Pyx_XDECREF((PyObject *)__pyx_v_memoryRequirements); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":21028 * return (_dict_cudaError_t[err], memoryRequirements) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMipmappedArrayGetMemoryRequirements(mipmap, int device): * """ Returns the memory requirements of a CUDA mipmapped array. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_231cudaMipmappedArrayGetMemoryRequirements(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_230cudaMipmappedArrayGetMemoryRequirements, "cudaMipmappedArrayGetMemoryRequirements(mipmap, int device)\n\nReturns the memory requirements of a CUDA mipmapped array.\n\nReturns the memory requirements of a CUDA mipmapped array in\n`memoryRequirements` If the CUDA mipmapped array is not allocated with\nflag :py:obj:`~.cudaArrayDeferredMapping`\n:py:obj:`~.cudaErrorInvalidValue` will be returned.\n\nThe returned value in :py:obj:`~.cudaArrayMemoryRequirements.size`\nrepresents the total size of the CUDA mipmapped array. The returned\nvalue in :py:obj:`~.cudaArrayMemoryRequirements.alignment` represents\nthe alignment necessary for mapping the CUDA mipmapped array.\n\nParameters\n----------\nmipmap : :py:obj:`~.cudaMipmappedArray_t`\n CUDA mipmapped array to get the memory requirements of\ndevice : int\n Device to get the memory requirements for\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess` :py:obj:`~.cudaErrorInvalidValue`\nmemoryRequirements : :py:obj:`~.cudaArrayMemoryRequirements`\n Pointer to :py:obj:`~.cudaArrayMemoryRequirements`\n\nSee Also\n--------\n:py:obj:`~.cudaArrayGetMemoryRequirements`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_231cudaMipmappedArrayGetMemoryRequirements = {"cudaMipmappedArrayGetMemoryRequirements", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_231cudaMipmappedArrayGetMemoryRequirements, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_230cudaMipmappedArrayGetMemoryRequirements}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_231cudaMipmappedArrayGetMemoryRequirements(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_mipmap = 0; int __pyx_v_device; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMipmappedArrayGetMemoryRequirements (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_mipmap_2,&__pyx_mstate_global->__pyx_n_u_device_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21028, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21028, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21028, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMipmappedArrayGetMemoryRequirements", 0) < (0)) __PYX_ERR(0, 21028, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMipmappedArrayGetMemoryRequirements", 1, 2, 2, i); __PYX_ERR(0, 21028, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21028, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21028, __pyx_L3_error) } __pyx_v_mipmap = values[0]; __pyx_v_device = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_device == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 21029, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMipmappedArrayGetMemoryRequirements", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 21028, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMipmappedArrayGetMemoryRequirements", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_230cudaMipmappedArrayGetMemoryRequirements(__pyx_self, __pyx_v_mipmap, __pyx_v_device); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_230cudaMipmappedArrayGetMemoryRequirements(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_mipmap, int __pyx_v_device) { cudaMipmappedArray_t __pyx_v_cymipmap; PyObject *__pyx_v_pmipmap = NULL; struct __pyx_obj_4cuda_8bindings_7runtime_cudaArrayMemoryRequirements *__pyx_v_memoryRequirements = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMipmappedArrayGetMemoryRequirements", 0); /* "cuda/bindings/runtime.pyx":21061 * """ * cdef cyruntime.cudaMipmappedArray_t cymipmap * if mipmap is None: # <<<<<<<<<<<<<< * pmipmap = 0 * elif isinstance(mipmap, (cudaMipmappedArray_t,)): */ __pyx_t_1 = (__pyx_v_mipmap == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":21062 * cdef cyruntime.cudaMipmappedArray_t cymipmap * if mipmap is None: * pmipmap = 0 # <<<<<<<<<<<<<< * elif isinstance(mipmap, (cudaMipmappedArray_t,)): * pmipmap = int(mipmap) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pmipmap = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":21061 * """ * cdef cyruntime.cudaMipmappedArray_t cymipmap * if mipmap is None: # <<<<<<<<<<<<<< * pmipmap = 0 * elif isinstance(mipmap, (cudaMipmappedArray_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":21063 * if mipmap is None: * pmipmap = 0 * elif isinstance(mipmap, (cudaMipmappedArray_t,)): # <<<<<<<<<<<<<< * pmipmap = int(mipmap) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_mipmap, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMipmappedArray_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":21064 * pmipmap = 0 * elif isinstance(mipmap, (cudaMipmappedArray_t,)): * pmipmap = int(mipmap) # <<<<<<<<<<<<<< * else: * pmipmap = int(cudaMipmappedArray_t(mipmap)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_mipmap); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21064, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_pmipmap = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":21063 * if mipmap is None: * pmipmap = 0 * elif isinstance(mipmap, (cudaMipmappedArray_t,)): # <<<<<<<<<<<<<< * pmipmap = int(mipmap) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":21066 * pmipmap = int(mipmap) * else: * pmipmap = int(cudaMipmappedArray_t(mipmap)) # <<<<<<<<<<<<<< * cymipmap = pmipmap * cdef cudaArrayMemoryRequirements memoryRequirements = cudaArrayMemoryRequirements() */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMipmappedArray_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMipmappedArray_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_mipmap}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21066, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21066, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_pmipmap = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":21067 * else: * pmipmap = int(cudaMipmappedArray_t(mipmap)) * cymipmap = pmipmap # <<<<<<<<<<<<<< * cdef cudaArrayMemoryRequirements memoryRequirements = cudaArrayMemoryRequirements() * with nogil: */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pmipmap); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 21067, __pyx_L1_error) __pyx_v_cymipmap = ((cudaMipmappedArray_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":21068 * pmipmap = int(cudaMipmappedArray_t(mipmap)) * cymipmap = pmipmap * cdef cudaArrayMemoryRequirements memoryRequirements = cudaArrayMemoryRequirements() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMipmappedArrayGetMemoryRequirements(memoryRequirements._pvt_ptr, cymipmap, device) */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArrayMemoryRequirements); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArrayMemoryRequirements); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21068, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_4); } __pyx_v_memoryRequirements = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaArrayMemoryRequirements *)__pyx_t_4); __pyx_t_4 = 0; /* "cuda/bindings/runtime.pyx":21069 * cymipmap = pmipmap * cdef cudaArrayMemoryRequirements memoryRequirements = cudaArrayMemoryRequirements() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMipmappedArrayGetMemoryRequirements(memoryRequirements._pvt_ptr, cymipmap, device) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":21070 * cdef cudaArrayMemoryRequirements memoryRequirements = cudaArrayMemoryRequirements() * with nogil: * err = cyruntime.cudaMipmappedArrayGetMemoryRequirements(memoryRequirements._pvt_ptr, cymipmap, device) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMipmappedArrayGetMemoryRequirements(((struct cudaArrayMemoryRequirements *)__pyx_v_memoryRequirements->_pvt_ptr), __pyx_v_cymipmap, __pyx_v_device); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21070, __pyx_L5_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":21069 * cymipmap = pmipmap * cdef cudaArrayMemoryRequirements memoryRequirements = cudaArrayMemoryRequirements() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMipmappedArrayGetMemoryRequirements(memoryRequirements._pvt_ptr, cymipmap, device) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L6; } __pyx_L5_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L6:; } } /* "cuda/bindings/runtime.pyx":21071 * with nogil: * err = cyruntime.cudaMipmappedArrayGetMemoryRequirements(memoryRequirements._pvt_ptr, cymipmap, device) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], memoryRequirements) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":21072 * err = cyruntime.cudaMipmappedArrayGetMemoryRequirements(memoryRequirements._pvt_ptr, cymipmap, device) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], memoryRequirements) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21072, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21072, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21072, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21072, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 21072, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 21072, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":21071 * with nogil: * err = cyruntime.cudaMipmappedArrayGetMemoryRequirements(memoryRequirements._pvt_ptr, cymipmap, device) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], memoryRequirements) */ } /* "cuda/bindings/runtime.pyx":21073 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], memoryRequirements) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21073, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21073, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21073, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21073, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 21073, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_memoryRequirements); __Pyx_GIVEREF((PyObject *)__pyx_v_memoryRequirements); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_memoryRequirements)) != (0)) __PYX_ERR(0, 21073, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":21028 * return (_dict_cudaError_t[err], memoryRequirements) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMipmappedArrayGetMemoryRequirements(mipmap, int device): * """ Returns the memory requirements of a CUDA mipmapped array. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMipmappedArrayGetMemoryRequirements", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pmipmap); __Pyx_XDECREF((PyObject *)__pyx_v_memoryRequirements); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":21075 * return (_dict_cudaError_t[err], memoryRequirements) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaArrayGetSparseProperties(array): * """ Returns the layout properties of a sparse CUDA array. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_233cudaArrayGetSparseProperties(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_232cudaArrayGetSparseProperties, "cudaArrayGetSparseProperties(array)\n\nReturns the layout properties of a sparse CUDA array.\n\nReturns the layout properties of a sparse CUDA array in\n`sparseProperties`. If the CUDA array is not allocated with flag\n:py:obj:`~.cudaArraySparse` :py:obj:`~.cudaErrorInvalidValue` will be\nreturned.\n\nIf the returned value in :py:obj:`~.cudaArraySparseProperties.flags`\ncontains :py:obj:`~.cudaArraySparsePropertiesSingleMipTail`, then\n:py:obj:`~.cudaArraySparseProperties.miptailSize` represents the total\nsize of the array. Otherwise, it will be zero. Also, the returned value\nin :py:obj:`~.cudaArraySparseProperties.miptailFirstLevel` is always\nzero. Note that the `array` must have been allocated using\n:py:obj:`~.cudaMallocArray` or :py:obj:`~.cudaMalloc3DArray`. For CUDA\narrays obtained using :py:obj:`~.cudaMipmappedArrayGetLevel`,\n:py:obj:`~.cudaErrorInvalidValue` will be returned. Instead,\n:py:obj:`~.cudaMipmappedArrayGetSparseProperties` must be used to\nobtain the sparse properties of the entire CUDA mipmapped array to\nwhich `array` belongs to.\n\nParameters\n----------\narray : :py:obj:`~.cudaArray_t`\n The CUDA array to get the sparse properties of\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess` :py:obj:`~.cudaErrorInvalidValue`\nsparseProperties : :py:obj:`~.cudaArraySparseProperties`\n Pointer to return the :py:obj:`~.cudaArraySparseProperties`\n\nSee Also\n--------\n:py:obj:`~.cudaMipmappedArrayGetSparseProperties`, :py:obj:`~.cuMemMapArrayAsync`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_233cudaArrayGetSparseProperties = {"cudaArrayGetSparseProperties", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_233cudaArrayGetSparseProperties, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_232cudaArrayGetSparseProperties}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_233cudaArrayGetSparseProperties(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_array = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaArrayGetSparseProperties (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_array_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21075, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21075, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaArrayGetSparseProperties", 0) < (0)) __PYX_ERR(0, 21075, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaArrayGetSparseProperties", 1, 1, 1, i); __PYX_ERR(0, 21075, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21075, __pyx_L3_error) } __pyx_v_array = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaArrayGetSparseProperties", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21075, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaArrayGetSparseProperties", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_232cudaArrayGetSparseProperties(__pyx_self, __pyx_v_array); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_232cudaArrayGetSparseProperties(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_array) { cudaArray_t __pyx_v_cyarray; PyObject *__pyx_v_parray = NULL; struct __pyx_obj_4cuda_8bindings_7runtime_cudaArraySparseProperties *__pyx_v_sparseProperties = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaArrayGetSparseProperties", 0); /* "cuda/bindings/runtime.pyx":21114 * """ * cdef cyruntime.cudaArray_t cyarray * if array is None: # <<<<<<<<<<<<<< * parray = 0 * elif isinstance(array, (cudaArray_t,)): */ __pyx_t_1 = (__pyx_v_array == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":21115 * cdef cyruntime.cudaArray_t cyarray * if array is None: * parray = 0 # <<<<<<<<<<<<<< * elif isinstance(array, (cudaArray_t,)): * parray = int(array) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_parray = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":21114 * """ * cdef cyruntime.cudaArray_t cyarray * if array is None: # <<<<<<<<<<<<<< * parray = 0 * elif isinstance(array, (cudaArray_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":21116 * if array is None: * parray = 0 * elif isinstance(array, (cudaArray_t,)): # <<<<<<<<<<<<<< * parray = int(array) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_array, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":21117 * parray = 0 * elif isinstance(array, (cudaArray_t,)): * parray = int(array) # <<<<<<<<<<<<<< * else: * parray = int(cudaArray_t(array)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_array); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21117, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_parray = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":21116 * if array is None: * parray = 0 * elif isinstance(array, (cudaArray_t,)): # <<<<<<<<<<<<<< * parray = int(array) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":21119 * parray = int(array) * else: * parray = int(cudaArray_t(array)) # <<<<<<<<<<<<<< * cyarray = parray * cdef cudaArraySparseProperties sparseProperties = cudaArraySparseProperties() */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_array}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21119, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21119, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_parray = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":21120 * else: * parray = int(cudaArray_t(array)) * cyarray = parray # <<<<<<<<<<<<<< * cdef cudaArraySparseProperties sparseProperties = cudaArraySparseProperties() * with nogil: */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_parray); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 21120, __pyx_L1_error) __pyx_v_cyarray = ((cudaArray_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":21121 * parray = int(cudaArray_t(array)) * cyarray = parray * cdef cudaArraySparseProperties sparseProperties = cudaArraySparseProperties() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaArrayGetSparseProperties(sparseProperties._pvt_ptr, cyarray) */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArraySparseProperties); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArraySparseProperties); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21121, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_4); } __pyx_v_sparseProperties = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaArraySparseProperties *)__pyx_t_4); __pyx_t_4 = 0; /* "cuda/bindings/runtime.pyx":21122 * cyarray = parray * cdef cudaArraySparseProperties sparseProperties = cudaArraySparseProperties() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaArrayGetSparseProperties(sparseProperties._pvt_ptr, cyarray) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":21123 * cdef cudaArraySparseProperties sparseProperties = cudaArraySparseProperties() * with nogil: * err = cyruntime.cudaArrayGetSparseProperties(sparseProperties._pvt_ptr, cyarray) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaArrayGetSparseProperties(((struct cudaArraySparseProperties *)__pyx_v_sparseProperties->_pvt_ptr), __pyx_v_cyarray); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21123, __pyx_L5_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":21122 * cyarray = parray * cdef cudaArraySparseProperties sparseProperties = cudaArraySparseProperties() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaArrayGetSparseProperties(sparseProperties._pvt_ptr, cyarray) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L6; } __pyx_L5_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L6:; } } /* "cuda/bindings/runtime.pyx":21124 * with nogil: * err = cyruntime.cudaArrayGetSparseProperties(sparseProperties._pvt_ptr, cyarray) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], sparseProperties) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":21125 * err = cyruntime.cudaArrayGetSparseProperties(sparseProperties._pvt_ptr, cyarray) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], sparseProperties) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21125, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21125, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21125, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21125, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 21125, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 21125, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":21124 * with nogil: * err = cyruntime.cudaArrayGetSparseProperties(sparseProperties._pvt_ptr, cyarray) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], sparseProperties) */ } /* "cuda/bindings/runtime.pyx":21126 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], sparseProperties) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21126, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21126, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21126, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21126, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 21126, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_sparseProperties); __Pyx_GIVEREF((PyObject *)__pyx_v_sparseProperties); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_sparseProperties)) != (0)) __PYX_ERR(0, 21126, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":21075 * return (_dict_cudaError_t[err], memoryRequirements) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaArrayGetSparseProperties(array): * """ Returns the layout properties of a sparse CUDA array. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaArrayGetSparseProperties", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_parray); __Pyx_XDECREF((PyObject *)__pyx_v_sparseProperties); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":21128 * return (_dict_cudaError_t[err], sparseProperties) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMipmappedArrayGetSparseProperties(mipmap): * """ Returns the layout properties of a sparse CUDA mipmapped array. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_235cudaMipmappedArrayGetSparseProperties(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_234cudaMipmappedArrayGetSparseProperties, "cudaMipmappedArrayGetSparseProperties(mipmap)\n\nReturns the layout properties of a sparse CUDA mipmapped array.\n\nReturns the sparse array layout properties in `sparseProperties`. If\nthe CUDA mipmapped array is not allocated with flag\n:py:obj:`~.cudaArraySparse` :py:obj:`~.cudaErrorInvalidValue` will be\nreturned.\n\nFor non-layered CUDA mipmapped arrays,\n:py:obj:`~.cudaArraySparseProperties.miptailSize` returns the size of\nthe mip tail region. The mip tail region includes all mip levels whose\nwidth, height or depth is less than that of the tile. For layered CUDA\nmipmapped arrays, if :py:obj:`~.cudaArraySparseProperties.flags`\ncontains :py:obj:`~.cudaArraySparsePropertiesSingleMipTail`, then\n:py:obj:`~.cudaArraySparseProperties.miptailSize` specifies the size of\nthe mip tail of all layers combined. Otherwise,\n:py:obj:`~.cudaArraySparseProperties.miptailSize` specifies mip tail\nsize per layer. The returned value of\n:py:obj:`~.cudaArraySparseProperties.miptailFirstLevel` is valid only\nif :py:obj:`~.cudaArraySparseProperties.miptailSize` is non-zero.\n\nParameters\n----------\nmipmap : :py:obj:`~.cudaMipmappedArray_t`\n The CUDA mipmapped array to get the sparse properties of\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess` :py:obj:`~.cudaErrorInvalidValue`\nsparseProperties : :py:obj:`~.cudaArraySparseProperties`\n Pointer to return :py:obj:`~.cudaArraySparseProperties`\n\nSee Also\n--------\n:py:obj:`~.cudaArrayGetSparseProperties`, :py:obj:`~.cuMemMapArrayAsync`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_235cudaMipmappedArrayGetSparseProperties = {"cudaMipmappedArrayGetSparseProperties", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_235cudaMipmappedArrayGetSparseProperties, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_234cudaMipmappedArrayGetSparseProperties}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_235cudaMipmappedArrayGetSparseProperties(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_mipmap = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMipmappedArrayGetSparseProperties (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_mipmap_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21128, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21128, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMipmappedArrayGetSparseProperties", 0) < (0)) __PYX_ERR(0, 21128, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMipmappedArrayGetSparseProperties", 1, 1, 1, i); __PYX_ERR(0, 21128, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21128, __pyx_L3_error) } __pyx_v_mipmap = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMipmappedArrayGetSparseProperties", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 21128, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMipmappedArrayGetSparseProperties", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_234cudaMipmappedArrayGetSparseProperties(__pyx_self, __pyx_v_mipmap); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_234cudaMipmappedArrayGetSparseProperties(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_mipmap) { cudaMipmappedArray_t __pyx_v_cymipmap; PyObject *__pyx_v_pmipmap = NULL; struct __pyx_obj_4cuda_8bindings_7runtime_cudaArraySparseProperties *__pyx_v_sparseProperties = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMipmappedArrayGetSparseProperties", 0); /* "cuda/bindings/runtime.pyx":21167 * """ * cdef cyruntime.cudaMipmappedArray_t cymipmap * if mipmap is None: # <<<<<<<<<<<<<< * pmipmap = 0 * elif isinstance(mipmap, (cudaMipmappedArray_t,)): */ __pyx_t_1 = (__pyx_v_mipmap == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":21168 * cdef cyruntime.cudaMipmappedArray_t cymipmap * if mipmap is None: * pmipmap = 0 # <<<<<<<<<<<<<< * elif isinstance(mipmap, (cudaMipmappedArray_t,)): * pmipmap = int(mipmap) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pmipmap = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":21167 * """ * cdef cyruntime.cudaMipmappedArray_t cymipmap * if mipmap is None: # <<<<<<<<<<<<<< * pmipmap = 0 * elif isinstance(mipmap, (cudaMipmappedArray_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":21169 * if mipmap is None: * pmipmap = 0 * elif isinstance(mipmap, (cudaMipmappedArray_t,)): # <<<<<<<<<<<<<< * pmipmap = int(mipmap) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_mipmap, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMipmappedArray_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":21170 * pmipmap = 0 * elif isinstance(mipmap, (cudaMipmappedArray_t,)): * pmipmap = int(mipmap) # <<<<<<<<<<<<<< * else: * pmipmap = int(cudaMipmappedArray_t(mipmap)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_mipmap); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21170, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_pmipmap = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":21169 * if mipmap is None: * pmipmap = 0 * elif isinstance(mipmap, (cudaMipmappedArray_t,)): # <<<<<<<<<<<<<< * pmipmap = int(mipmap) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":21172 * pmipmap = int(mipmap) * else: * pmipmap = int(cudaMipmappedArray_t(mipmap)) # <<<<<<<<<<<<<< * cymipmap = pmipmap * cdef cudaArraySparseProperties sparseProperties = cudaArraySparseProperties() */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMipmappedArray_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMipmappedArray_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_mipmap}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21172, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21172, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_pmipmap = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":21173 * else: * pmipmap = int(cudaMipmappedArray_t(mipmap)) * cymipmap = pmipmap # <<<<<<<<<<<<<< * cdef cudaArraySparseProperties sparseProperties = cudaArraySparseProperties() * with nogil: */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pmipmap); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 21173, __pyx_L1_error) __pyx_v_cymipmap = ((cudaMipmappedArray_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":21174 * pmipmap = int(cudaMipmappedArray_t(mipmap)) * cymipmap = pmipmap * cdef cudaArraySparseProperties sparseProperties = cudaArraySparseProperties() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMipmappedArrayGetSparseProperties(sparseProperties._pvt_ptr, cymipmap) */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArraySparseProperties); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArraySparseProperties); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21174, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_4); } __pyx_v_sparseProperties = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaArraySparseProperties *)__pyx_t_4); __pyx_t_4 = 0; /* "cuda/bindings/runtime.pyx":21175 * cymipmap = pmipmap * cdef cudaArraySparseProperties sparseProperties = cudaArraySparseProperties() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMipmappedArrayGetSparseProperties(sparseProperties._pvt_ptr, cymipmap) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":21176 * cdef cudaArraySparseProperties sparseProperties = cudaArraySparseProperties() * with nogil: * err = cyruntime.cudaMipmappedArrayGetSparseProperties(sparseProperties._pvt_ptr, cymipmap) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMipmappedArrayGetSparseProperties(((struct cudaArraySparseProperties *)__pyx_v_sparseProperties->_pvt_ptr), __pyx_v_cymipmap); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21176, __pyx_L5_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":21175 * cymipmap = pmipmap * cdef cudaArraySparseProperties sparseProperties = cudaArraySparseProperties() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMipmappedArrayGetSparseProperties(sparseProperties._pvt_ptr, cymipmap) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L6; } __pyx_L5_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L6:; } } /* "cuda/bindings/runtime.pyx":21177 * with nogil: * err = cyruntime.cudaMipmappedArrayGetSparseProperties(sparseProperties._pvt_ptr, cymipmap) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], sparseProperties) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":21178 * err = cyruntime.cudaMipmappedArrayGetSparseProperties(sparseProperties._pvt_ptr, cymipmap) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], sparseProperties) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21178, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21178, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21178, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21178, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 21178, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 21178, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":21177 * with nogil: * err = cyruntime.cudaMipmappedArrayGetSparseProperties(sparseProperties._pvt_ptr, cymipmap) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], sparseProperties) */ } /* "cuda/bindings/runtime.pyx":21179 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], sparseProperties) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21179, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21179, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21179, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21179, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 21179, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_sparseProperties); __Pyx_GIVEREF((PyObject *)__pyx_v_sparseProperties); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_sparseProperties)) != (0)) __PYX_ERR(0, 21179, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":21128 * return (_dict_cudaError_t[err], sparseProperties) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMipmappedArrayGetSparseProperties(mipmap): * """ Returns the layout properties of a sparse CUDA mipmapped array. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMipmappedArrayGetSparseProperties", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pmipmap); __Pyx_XDECREF((PyObject *)__pyx_v_sparseProperties); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":21181 * return (_dict_cudaError_t[err], sparseProperties) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpy(dst, src, size_t count, kind not None : cudaMemcpyKind): * """ Copies data between host and device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_237cudaMemcpy(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_236cudaMemcpy, "cudaMemcpy(dst, src, size_t count, kind: cudaMemcpyKind)\n\nCopies data between host and device.\n\n Copies `count` bytes from the memory area pointed to by `src` to the\n memory area pointed to by `dst`, where `kind` specifies the direction\n of the copy, and must be one of :py:obj:`~.cudaMemcpyHostToHost`,\n :py:obj:`~.cudaMemcpyHostToDevice`, :py:obj:`~.cudaMemcpyDeviceToHost`,\n :py:obj:`~.cudaMemcpyDeviceToDevice`, or :py:obj:`~.cudaMemcpyDefault`.\n Passing :py:obj:`~.cudaMemcpyDefault` is recommended, in which case the\n type of transfer is inferred from the pointer values. However,\n :py:obj:`~.cudaMemcpyDefault` is only allowed on systems that support\n unified virtual addressing. Calling :py:obj:`~.cudaMemcpy()` with dst\n and src pointers that do not match the direction of the copy results in\n an undefined behavior.\n\n \note_sync\n\n Parameters\n ----------\n dst : Any\n Destination memory address\n src : Any\n Source memory address\n count : size_t\n Size in bytes to copy\n kind : :py:obj:`~.cudaMemcpyKind`\n Type of transfer\n\n Returns\n -------\n cudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidMemcpyDirection`\n\n See Also\n --------\n :py:obj:`~.cudaMemcpy2D`, :py:obj:`~.cudaMemcpy2DToArray`, :py:obj:`~.cudaMemcpy2DFromArray`, :py:obj:`~.cudaMemcpy2DArrayToArray`, :py:obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy2DAsync`, :py:obj:`~.cudaMemcpy2DToArrayAsync`, :py:obj:`~.cudaMemcpy2DFromArrayAsync`, :py:obj:`~.cudaMemcpyToSymbolAsync`, :py:obj:`~.cudaMemcpyFromSymbolAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpy`\n "); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_237cudaMemcpy = {"cudaMemcpy", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_237cudaMemcpy, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_236cudaMemcpy}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_237cudaMemcpy(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_dst = 0; PyObject *__pyx_v_src = 0; size_t __pyx_v_count; PyObject *__pyx_v_kind = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[4] = {0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemcpy (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_dst_2,&__pyx_mstate_global->__pyx_n_u_src_2,&__pyx_mstate_global->__pyx_n_u_count,&__pyx_mstate_global->__pyx_n_u_kind_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21181, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 21181, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 21181, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21181, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21181, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemcpy", 0) < (0)) __PYX_ERR(0, 21181, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 4; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemcpy", 1, 4, 4, i); __PYX_ERR(0, 21181, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 4)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21181, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21181, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 21181, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 21181, __pyx_L3_error) } __pyx_v_dst = values[0]; __pyx_v_src = values[1]; __pyx_v_count = __Pyx_PyLong_As_size_t(values[2]); if (unlikely((__pyx_v_count == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 21182, __pyx_L3_error) __pyx_v_kind = values[3]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemcpy", 1, 4, 4, __pyx_nargs); __PYX_ERR(0, 21181, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpy", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_kind) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "kind"); __PYX_ERR(0, 21182, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_236cudaMemcpy(__pyx_self, __pyx_v_dst, __pyx_v_src, __pyx_v_count, __pyx_v_kind); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_236cudaMemcpy(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src, size_t __pyx_v_count, PyObject *__pyx_v_kind) { struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cydst = NULL; void *__pyx_v_cydst_ptr; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cysrc = NULL; void *__pyx_v_cysrc_ptr; enum cudaMemcpyKind __pyx_v_cykind; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_5; enum cudaMemcpyKind __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemcpy", 0); /* "cuda/bindings/runtime.pyx":21219 * :py:obj:`~.cudaMemcpy2D`, :py:obj:`~.cudaMemcpy2DToArray`, :py:obj:`~.cudaMemcpy2DFromArray`, :py:obj:`~.cudaMemcpy2DArrayToArray`, :py:obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy2DAsync`, :py:obj:`~.cudaMemcpy2DToArrayAsync`, :py:obj:`~.cudaMemcpy2DFromArrayAsync`, :py:obj:`~.cudaMemcpyToSymbolAsync`, :py:obj:`~.cudaMemcpyFromSymbolAsync`, :py:obj:`~.cuMemcpyDtoH`, :py:obj:`~.cuMemcpyHtoD`, :py:obj:`~.cuMemcpyDtoD`, :py:obj:`~.cuMemcpy` * """ * cydst = _HelperInputVoidPtr(dst) # <<<<<<<<<<<<<< * cdef void* cydst_ptr = cydst.cptr * cysrc = _HelperInputVoidPtr(src) */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_dst}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21219, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_cydst = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":21220 * """ * cydst = _HelperInputVoidPtr(dst) * cdef void* cydst_ptr = cydst.cptr # <<<<<<<<<<<<<< * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cydst), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21220, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_5 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 21220, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cydst_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_5)); /* "cuda/bindings/runtime.pyx":21221 * cydst = _HelperInputVoidPtr(dst) * cdef void* cydst_ptr = cydst.cptr * cysrc = _HelperInputVoidPtr(src) # <<<<<<<<<<<<<< * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_src}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21221, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_cysrc = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":21222 * cdef void* cydst_ptr = cydst.cptr * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cysrc), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21222, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_5 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 21222, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cysrc_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_5)); /* "cuda/bindings/runtime.pyx":21223 * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemcpy(cydst_ptr, cysrc_ptr, count, cykind) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_kind, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21223, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_6 = ((enum cudaMemcpyKind)__Pyx_PyLong_As_enum__cudaMemcpyKind(__pyx_t_1)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 21223, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cykind = __pyx_t_6; /* "cuda/bindings/runtime.pyx":21224 * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpy(cydst_ptr, cysrc_ptr, count, cykind) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":21225 * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: * err = cyruntime.cudaMemcpy(cydst_ptr, cysrc_ptr, count, cykind) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemcpy(__pyx_v_cydst_ptr, __pyx_v_cysrc_ptr, __pyx_v_count, __pyx_v_cykind); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21225, __pyx_L4_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":21224 * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpy(cydst_ptr, cysrc_ptr, count, cykind) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":21226 * with nogil: * err = cyruntime.cudaMemcpy(cydst_ptr, cysrc_ptr, count, cykind) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 21226, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":21181 * return (_dict_cudaError_t[err], sparseProperties) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpy(dst, src, size_t count, kind not None : cudaMemcpyKind): * """ Copies data between host and device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_cydst); __Pyx_XDECREF((PyObject *)__pyx_v_cysrc); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":21228 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpyPeer(dst, int dstDevice, src, int srcDevice, size_t count): * """ Copies memory between two devices. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_239cudaMemcpyPeer(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_238cudaMemcpyPeer, "cudaMemcpyPeer(dst, int dstDevice, src, int srcDevice, size_t count)\n\nCopies memory between two devices.\n\nCopies memory from one device to memory on another device. `dst` is the\nbase device pointer of the destination memory and `dstDevice` is the\ndestination device. `src` is the base device pointer of the source\nmemory and `srcDevice` is the source device. `count` specifies the\nnumber of bytes to copy.\n\nNote that this function is asynchronous with respect to the host, but\nserialized with respect all pending and future asynchronous work in to\nthe current device, `srcDevice`, and `dstDevice` (use\n:py:obj:`~.cudaMemcpyPeerAsync` to avoid this synchronization).\n\nParameters\n----------\ndst : Any\n Destination device pointer\ndstDevice : int\n Destination device\nsrc : Any\n Source device pointer\nsrcDevice : int\n Source device\ncount : size_t\n Size of memory copy in bytes\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidDevice`\n\nSee Also\n--------\n:py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpyPeerAsync`, :py:obj:`~.cudaMemcpy3DPeerAsync`, :py:obj:`~.cuMemcpyPeer`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_239cudaMemcpyPeer = {"cudaMemcpyPeer", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_239cudaMemcpyPeer, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_238cudaMemcpyPeer}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_239cudaMemcpyPeer(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_dst = 0; int __pyx_v_dstDevice; PyObject *__pyx_v_src = 0; int __pyx_v_srcDevice; size_t __pyx_v_count; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[5] = {0,0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemcpyPeer (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_dst_2,&__pyx_mstate_global->__pyx_n_u_dstDevice_2,&__pyx_mstate_global->__pyx_n_u_src_2,&__pyx_mstate_global->__pyx_n_u_srcDevice_2,&__pyx_mstate_global->__pyx_n_u_count,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21228, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 5: values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 21228, __pyx_L3_error) CYTHON_FALLTHROUGH; case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 21228, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 21228, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21228, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21228, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemcpyPeer", 0) < (0)) __PYX_ERR(0, 21228, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 5; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemcpyPeer", 1, 5, 5, i); __PYX_ERR(0, 21228, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 5)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21228, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21228, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 21228, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 21228, __pyx_L3_error) values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 21228, __pyx_L3_error) } __pyx_v_dst = values[0]; __pyx_v_dstDevice = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_dstDevice == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 21229, __pyx_L3_error) __pyx_v_src = values[2]; __pyx_v_srcDevice = __Pyx_PyLong_As_int(values[3]); if (unlikely((__pyx_v_srcDevice == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 21229, __pyx_L3_error) __pyx_v_count = __Pyx_PyLong_As_size_t(values[4]); if (unlikely((__pyx_v_count == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 21229, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemcpyPeer", 1, 5, 5, __pyx_nargs); __PYX_ERR(0, 21228, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpyPeer", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_238cudaMemcpyPeer(__pyx_self, __pyx_v_dst, __pyx_v_dstDevice, __pyx_v_src, __pyx_v_srcDevice, __pyx_v_count); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_238cudaMemcpyPeer(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_dst, int __pyx_v_dstDevice, PyObject *__pyx_v_src, int __pyx_v_srcDevice, size_t __pyx_v_count) { struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cydst = NULL; void *__pyx_v_cydst_ptr; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cysrc = NULL; void *__pyx_v_cysrc_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_5; cudaError_t __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemcpyPeer", 0); /* "cuda/bindings/runtime.pyx":21265 * :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpyPeerAsync`, :py:obj:`~.cudaMemcpy3DPeerAsync`, :py:obj:`~.cuMemcpyPeer` * """ * cydst = _HelperInputVoidPtr(dst) # <<<<<<<<<<<<<< * cdef void* cydst_ptr = cydst.cptr * cysrc = _HelperInputVoidPtr(src) */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_dst}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21265, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_cydst = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":21266 * """ * cydst = _HelperInputVoidPtr(dst) * cdef void* cydst_ptr = cydst.cptr # <<<<<<<<<<<<<< * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cydst), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21266, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_5 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 21266, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cydst_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_5)); /* "cuda/bindings/runtime.pyx":21267 * cydst = _HelperInputVoidPtr(dst) * cdef void* cydst_ptr = cydst.cptr * cysrc = _HelperInputVoidPtr(src) # <<<<<<<<<<<<<< * cdef void* cysrc_ptr = cysrc.cptr * with nogil: */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_src}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21267, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_cysrc = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":21268 * cdef void* cydst_ptr = cydst.cptr * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemcpyPeer(cydst_ptr, dstDevice, cysrc_ptr, srcDevice, count) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cysrc), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21268, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_5 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 21268, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cysrc_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_5)); /* "cuda/bindings/runtime.pyx":21269 * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpyPeer(cydst_ptr, dstDevice, cysrc_ptr, srcDevice, count) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":21270 * cdef void* cysrc_ptr = cysrc.cptr * with nogil: * err = cyruntime.cudaMemcpyPeer(cydst_ptr, dstDevice, cysrc_ptr, srcDevice, count) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_6 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemcpyPeer(__pyx_v_cydst_ptr, __pyx_v_dstDevice, __pyx_v_cysrc_ptr, __pyx_v_srcDevice, __pyx_v_count); if (unlikely(__pyx_t_6 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21270, __pyx_L4_error) __pyx_v_err = __pyx_t_6; } /* "cuda/bindings/runtime.pyx":21269 * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpyPeer(cydst_ptr, dstDevice, cysrc_ptr, srcDevice, count) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":21271 * with nogil: * err = cyruntime.cudaMemcpyPeer(cydst_ptr, dstDevice, cysrc_ptr, srcDevice, count) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21271, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21271, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21271, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21271, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 21271, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":21228 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpyPeer(dst, int dstDevice, src, int srcDevice, size_t count): * """ Copies memory between two devices. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpyPeer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_cydst); __Pyx_XDECREF((PyObject *)__pyx_v_cysrc); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":21273 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpy2D(dst, size_t dpitch, src, size_t spitch, size_t width, size_t height, kind not None : cudaMemcpyKind): * """ Copies data between host and device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_241cudaMemcpy2D(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_240cudaMemcpy2D, "cudaMemcpy2D(dst, size_t dpitch, src, size_t spitch, size_t width, size_t height, kind: cudaMemcpyKind)\n\nCopies data between host and device.\n\nCopies a matrix (`height` rows of `width` bytes each) from the memory\narea pointed to by `src` to the memory area pointed to by `dst`, where\n`kind` specifies the direction of the copy, and must be one of\n:py:obj:`~.cudaMemcpyHostToHost`, :py:obj:`~.cudaMemcpyHostToDevice`,\n:py:obj:`~.cudaMemcpyDeviceToHost`,\n:py:obj:`~.cudaMemcpyDeviceToDevice`, or :py:obj:`~.cudaMemcpyDefault`.\nPassing :py:obj:`~.cudaMemcpyDefault` is recommended, in which case the\ntype of transfer is inferred from the pointer values. However,\n:py:obj:`~.cudaMemcpyDefault` is only allowed on systems that support\nunified virtual addressing. `dpitch` and `spitch` are the widths in\nmemory in bytes of the 2D arrays pointed to by `dst` and `src`,\nincluding any padding added to the end of each row. The memory areas\nmay not overlap. `width` must not exceed either `dpitch` or `spitch`.\nCalling :py:obj:`~.cudaMemcpy2D()` with `dst` and `src` pointers that\ndo not match the direction of the copy results in an undefined\nbehavior. :py:obj:`~.cudaMemcpy2D()` returns an error if `dpitch` or\n`spitch` exceeds the maximum allowed.\n\nParameters\n----------\ndst : Any\n Destination memory address\ndpitch : size_t\n Pitch of destination memory\nsrc : Any\n Source memory address\nspitch : size_t\n Pitch of source memory\nwidth : size_t\n Width of matrix transfer (columns in bytes)\nheight : size_t\n Height of matrix transfer (rows)\nkind : :py:obj:`~.cudaMemcpyKind`\n Type of transfer\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidPitchValue`, :py:obj:`~.cudaErrorInvalidMemcpyDirection`\n\nSee Also\n--------\n:py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpy2DToArray`, :py:obj:`~.cudaMemcpy2DFromArray`, :py:obj:`~.cudaMemcpy2DArrayToArray`, :py:obj:`~.cudaMemcpyToSy""mbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy2DAsync`, :py:obj:`~.cudaMemcpy2DToArrayAsync`, :py:obj:`~.cudaMemcpy2DFromArrayAsync`, :py:obj:`~.cudaMemcpyToSymbolAsync`, :py:obj:`~.cudaMemcpyFromSymbolAsync`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DUnaligned`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_241cudaMemcpy2D = {"cudaMemcpy2D", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_241cudaMemcpy2D, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_240cudaMemcpy2D}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_241cudaMemcpy2D(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_dst = 0; size_t __pyx_v_dpitch; PyObject *__pyx_v_src = 0; size_t __pyx_v_spitch; size_t __pyx_v_width; size_t __pyx_v_height; PyObject *__pyx_v_kind = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[7] = {0,0,0,0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemcpy2D (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_dst_2,&__pyx_mstate_global->__pyx_n_u_dpitch,&__pyx_mstate_global->__pyx_n_u_src_2,&__pyx_mstate_global->__pyx_n_u_spitch,&__pyx_mstate_global->__pyx_n_u_width_2,&__pyx_mstate_global->__pyx_n_u_height_2,&__pyx_mstate_global->__pyx_n_u_kind_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21273, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 7: values[6] = __Pyx_ArgRef_FASTCALL(__pyx_args, 6); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[6])) __PYX_ERR(0, 21273, __pyx_L3_error) CYTHON_FALLTHROUGH; case 6: values[5] = __Pyx_ArgRef_FASTCALL(__pyx_args, 5); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[5])) __PYX_ERR(0, 21273, __pyx_L3_error) CYTHON_FALLTHROUGH; case 5: values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 21273, __pyx_L3_error) CYTHON_FALLTHROUGH; case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 21273, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 21273, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21273, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21273, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemcpy2D", 0) < (0)) __PYX_ERR(0, 21273, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 7; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemcpy2D", 1, 7, 7, i); __PYX_ERR(0, 21273, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 7)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21273, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21273, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 21273, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 21273, __pyx_L3_error) values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 21273, __pyx_L3_error) values[5] = __Pyx_ArgRef_FASTCALL(__pyx_args, 5); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[5])) __PYX_ERR(0, 21273, __pyx_L3_error) values[6] = __Pyx_ArgRef_FASTCALL(__pyx_args, 6); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[6])) __PYX_ERR(0, 21273, __pyx_L3_error) } __pyx_v_dst = values[0]; __pyx_v_dpitch = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_dpitch == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 21274, __pyx_L3_error) __pyx_v_src = values[2]; __pyx_v_spitch = __Pyx_PyLong_As_size_t(values[3]); if (unlikely((__pyx_v_spitch == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 21274, __pyx_L3_error) __pyx_v_width = __Pyx_PyLong_As_size_t(values[4]); if (unlikely((__pyx_v_width == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 21274, __pyx_L3_error) __pyx_v_height = __Pyx_PyLong_As_size_t(values[5]); if (unlikely((__pyx_v_height == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 21274, __pyx_L3_error) __pyx_v_kind = values[6]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemcpy2D", 1, 7, 7, __pyx_nargs); __PYX_ERR(0, 21273, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpy2D", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_kind) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "kind"); __PYX_ERR(0, 21274, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_240cudaMemcpy2D(__pyx_self, __pyx_v_dst, __pyx_v_dpitch, __pyx_v_src, __pyx_v_spitch, __pyx_v_width, __pyx_v_height, __pyx_v_kind); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_240cudaMemcpy2D(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_dst, size_t __pyx_v_dpitch, PyObject *__pyx_v_src, size_t __pyx_v_spitch, size_t __pyx_v_width, size_t __pyx_v_height, PyObject *__pyx_v_kind) { struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cydst = NULL; void *__pyx_v_cydst_ptr; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cysrc = NULL; void *__pyx_v_cysrc_ptr; enum cudaMemcpyKind __pyx_v_cykind; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_5; enum cudaMemcpyKind __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemcpy2D", 0); /* "cuda/bindings/runtime.pyx":21321 * :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpy2DToArray`, :py:obj:`~.cudaMemcpy2DFromArray`, :py:obj:`~.cudaMemcpy2DArrayToArray`, :py:obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy2DAsync`, :py:obj:`~.cudaMemcpy2DToArrayAsync`, :py:obj:`~.cudaMemcpy2DFromArrayAsync`, :py:obj:`~.cudaMemcpyToSymbolAsync`, :py:obj:`~.cudaMemcpyFromSymbolAsync`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DUnaligned` * """ * cydst = _HelperInputVoidPtr(dst) # <<<<<<<<<<<<<< * cdef void* cydst_ptr = cydst.cptr * cysrc = _HelperInputVoidPtr(src) */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_dst}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21321, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_cydst = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":21322 * """ * cydst = _HelperInputVoidPtr(dst) * cdef void* cydst_ptr = cydst.cptr # <<<<<<<<<<<<<< * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cydst), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21322, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_5 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 21322, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cydst_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_5)); /* "cuda/bindings/runtime.pyx":21323 * cydst = _HelperInputVoidPtr(dst) * cdef void* cydst_ptr = cydst.cptr * cysrc = _HelperInputVoidPtr(src) # <<<<<<<<<<<<<< * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_src}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21323, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_cysrc = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":21324 * cdef void* cydst_ptr = cydst.cptr * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cysrc), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21324, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_5 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 21324, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cysrc_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_5)); /* "cuda/bindings/runtime.pyx":21325 * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemcpy2D(cydst_ptr, dpitch, cysrc_ptr, spitch, width, height, cykind) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_kind, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21325, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_6 = ((enum cudaMemcpyKind)__Pyx_PyLong_As_enum__cudaMemcpyKind(__pyx_t_1)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 21325, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cykind = __pyx_t_6; /* "cuda/bindings/runtime.pyx":21326 * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpy2D(cydst_ptr, dpitch, cysrc_ptr, spitch, width, height, cykind) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":21327 * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: * err = cyruntime.cudaMemcpy2D(cydst_ptr, dpitch, cysrc_ptr, spitch, width, height, cykind) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemcpy2D(__pyx_v_cydst_ptr, __pyx_v_dpitch, __pyx_v_cysrc_ptr, __pyx_v_spitch, __pyx_v_width, __pyx_v_height, __pyx_v_cykind); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21327, __pyx_L4_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":21326 * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpy2D(cydst_ptr, dpitch, cysrc_ptr, spitch, width, height, cykind) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":21328 * with nogil: * err = cyruntime.cudaMemcpy2D(cydst_ptr, dpitch, cysrc_ptr, spitch, width, height, cykind) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21328, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21328, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21328, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21328, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 21328, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":21273 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpy2D(dst, size_t dpitch, src, size_t spitch, size_t width, size_t height, kind not None : cudaMemcpyKind): * """ Copies data between host and device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpy2D", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_cydst); __Pyx_XDECREF((PyObject *)__pyx_v_cysrc); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":21330 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpy2DToArray(dst, size_t wOffset, size_t hOffset, src, size_t spitch, size_t width, size_t height, kind not None : cudaMemcpyKind): * """ Copies data between host and device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_243cudaMemcpy2DToArray(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_242cudaMemcpy2DToArray, "cudaMemcpy2DToArray(dst, size_t wOffset, size_t hOffset, src, size_t spitch, size_t width, size_t height, kind: cudaMemcpyKind)\n\nCopies data between host and device.\n\nCopies a matrix (`height` rows of `width` bytes each) from the memory\narea pointed to by `src` to the CUDA array `dst` starting at `hOffset`\nrows and `wOffset` bytes from the upper left corner, where `kind`\nspecifies the direction of the copy, and must be one of\n:py:obj:`~.cudaMemcpyHostToHost`, :py:obj:`~.cudaMemcpyHostToDevice`,\n:py:obj:`~.cudaMemcpyDeviceToHost`,\n:py:obj:`~.cudaMemcpyDeviceToDevice`, or :py:obj:`~.cudaMemcpyDefault`.\nPassing :py:obj:`~.cudaMemcpyDefault` is recommended, in which case the\ntype of transfer is inferred from the pointer values. However,\n:py:obj:`~.cudaMemcpyDefault` is only allowed on systems that support\nunified virtual addressing. `spitch` is the width in memory in bytes of\nthe 2D array pointed to by `src`, including any padding added to the\nend of each row. `wOffset` + `width` must not exceed the width of the\nCUDA array `dst`. `width` must not exceed `spitch`.\n:py:obj:`~.cudaMemcpy2DToArray()` returns an error if `spitch` exceeds\nthe maximum allowed.\n\nParameters\n----------\ndst : :py:obj:`~.cudaArray_t`\n Destination memory address\nwOffset : size_t\n Destination starting X offset (columns in bytes)\nhOffset : size_t\n Destination starting Y offset (rows)\nsrc : Any\n Source memory address\nspitch : size_t\n Pitch of source memory\nwidth : size_t\n Width of matrix transfer (columns in bytes)\nheight : size_t\n Height of matrix transfer (rows)\nkind : :py:obj:`~.cudaMemcpyKind`\n Type of transfer\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidPitchValue`, :py:obj:`~.cudaErrorInvalidMemcpyDirection`\n\nSee Also\n--------\n:py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpy2D`, :py:obj:`~.cudaMemcpy2DFromArray`, :py:obj:`~.cudaMemcpy2DArrayToArray`, :py:""obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy2DAsync`, :py:obj:`~.cudaMemcpy2DToArrayAsync`, :py:obj:`~.cudaMemcpy2DFromArrayAsync`, :py:obj:`~.cudaMemcpyToSymbolAsync`, :py:obj:`~.cudaMemcpyFromSymbolAsync`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DUnaligned`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_243cudaMemcpy2DToArray = {"cudaMemcpy2DToArray", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_243cudaMemcpy2DToArray, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_242cudaMemcpy2DToArray}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_243cudaMemcpy2DToArray(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_dst = 0; size_t __pyx_v_wOffset; size_t __pyx_v_hOffset; PyObject *__pyx_v_src = 0; size_t __pyx_v_spitch; size_t __pyx_v_width; size_t __pyx_v_height; PyObject *__pyx_v_kind = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[8] = {0,0,0,0,0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemcpy2DToArray (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_dst_2,&__pyx_mstate_global->__pyx_n_u_wOffset,&__pyx_mstate_global->__pyx_n_u_hOffset,&__pyx_mstate_global->__pyx_n_u_src_2,&__pyx_mstate_global->__pyx_n_u_spitch,&__pyx_mstate_global->__pyx_n_u_width_2,&__pyx_mstate_global->__pyx_n_u_height_2,&__pyx_mstate_global->__pyx_n_u_kind_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21330, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 8: values[7] = __Pyx_ArgRef_FASTCALL(__pyx_args, 7); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[7])) __PYX_ERR(0, 21330, __pyx_L3_error) CYTHON_FALLTHROUGH; case 7: values[6] = __Pyx_ArgRef_FASTCALL(__pyx_args, 6); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[6])) __PYX_ERR(0, 21330, __pyx_L3_error) CYTHON_FALLTHROUGH; case 6: values[5] = __Pyx_ArgRef_FASTCALL(__pyx_args, 5); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[5])) __PYX_ERR(0, 21330, __pyx_L3_error) CYTHON_FALLTHROUGH; case 5: values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 21330, __pyx_L3_error) CYTHON_FALLTHROUGH; case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 21330, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 21330, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21330, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21330, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemcpy2DToArray", 0) < (0)) __PYX_ERR(0, 21330, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 8; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemcpy2DToArray", 1, 8, 8, i); __PYX_ERR(0, 21330, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 8)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21330, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21330, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 21330, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 21330, __pyx_L3_error) values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 21330, __pyx_L3_error) values[5] = __Pyx_ArgRef_FASTCALL(__pyx_args, 5); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[5])) __PYX_ERR(0, 21330, __pyx_L3_error) values[6] = __Pyx_ArgRef_FASTCALL(__pyx_args, 6); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[6])) __PYX_ERR(0, 21330, __pyx_L3_error) values[7] = __Pyx_ArgRef_FASTCALL(__pyx_args, 7); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[7])) __PYX_ERR(0, 21330, __pyx_L3_error) } __pyx_v_dst = values[0]; __pyx_v_wOffset = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_wOffset == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 21331, __pyx_L3_error) __pyx_v_hOffset = __Pyx_PyLong_As_size_t(values[2]); if (unlikely((__pyx_v_hOffset == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 21331, __pyx_L3_error) __pyx_v_src = values[3]; __pyx_v_spitch = __Pyx_PyLong_As_size_t(values[4]); if (unlikely((__pyx_v_spitch == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 21331, __pyx_L3_error) __pyx_v_width = __Pyx_PyLong_As_size_t(values[5]); if (unlikely((__pyx_v_width == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 21331, __pyx_L3_error) __pyx_v_height = __Pyx_PyLong_As_size_t(values[6]); if (unlikely((__pyx_v_height == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 21331, __pyx_L3_error) __pyx_v_kind = values[7]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemcpy2DToArray", 1, 8, 8, __pyx_nargs); __PYX_ERR(0, 21330, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpy2DToArray", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_kind) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "kind"); __PYX_ERR(0, 21331, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_242cudaMemcpy2DToArray(__pyx_self, __pyx_v_dst, __pyx_v_wOffset, __pyx_v_hOffset, __pyx_v_src, __pyx_v_spitch, __pyx_v_width, __pyx_v_height, __pyx_v_kind); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_242cudaMemcpy2DToArray(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_dst, size_t __pyx_v_wOffset, size_t __pyx_v_hOffset, PyObject *__pyx_v_src, size_t __pyx_v_spitch, size_t __pyx_v_width, size_t __pyx_v_height, PyObject *__pyx_v_kind) { cudaArray_t __pyx_v_cydst; PyObject *__pyx_v_pdst = NULL; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cysrc = NULL; void *__pyx_v_cysrc_ptr; enum cudaMemcpyKind __pyx_v_cykind; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; enum cudaMemcpyKind __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemcpy2DToArray", 0); /* "cuda/bindings/runtime.pyx":21380 * """ * cdef cyruntime.cudaArray_t cydst * if dst is None: # <<<<<<<<<<<<<< * pdst = 0 * elif isinstance(dst, (cudaArray_t,)): */ __pyx_t_1 = (__pyx_v_dst == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":21381 * cdef cyruntime.cudaArray_t cydst * if dst is None: * pdst = 0 # <<<<<<<<<<<<<< * elif isinstance(dst, (cudaArray_t,)): * pdst = int(dst) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pdst = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":21380 * """ * cdef cyruntime.cudaArray_t cydst * if dst is None: # <<<<<<<<<<<<<< * pdst = 0 * elif isinstance(dst, (cudaArray_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":21382 * if dst is None: * pdst = 0 * elif isinstance(dst, (cudaArray_t,)): # <<<<<<<<<<<<<< * pdst = int(dst) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_dst, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":21383 * pdst = 0 * elif isinstance(dst, (cudaArray_t,)): * pdst = int(dst) # <<<<<<<<<<<<<< * else: * pdst = int(cudaArray_t(dst)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_dst); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21383, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_pdst = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":21382 * if dst is None: * pdst = 0 * elif isinstance(dst, (cudaArray_t,)): # <<<<<<<<<<<<<< * pdst = int(dst) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":21385 * pdst = int(dst) * else: * pdst = int(cudaArray_t(dst)) # <<<<<<<<<<<<<< * cydst = pdst * cysrc = _HelperInputVoidPtr(src) */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_dst}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21385, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21385, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_pdst = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":21386 * else: * pdst = int(cudaArray_t(dst)) * cydst = pdst # <<<<<<<<<<<<<< * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pdst); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 21386, __pyx_L1_error) __pyx_v_cydst = ((cudaArray_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":21387 * pdst = int(cudaArray_t(dst)) * cydst = pdst * cysrc = _HelperInputVoidPtr(src) # <<<<<<<<<<<<<< * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_src}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21387, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_4); } __pyx_v_cysrc = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_4); __pyx_t_4 = 0; /* "cuda/bindings/runtime.pyx":21388 * cydst = pdst * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cysrc), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21388, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_4); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 21388, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_cysrc_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":21389 * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemcpy2DToArray(cydst, wOffset, hOffset, cysrc_ptr, spitch, width, height, cykind) */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_kind, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21389, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_7 = ((enum cudaMemcpyKind)__Pyx_PyLong_As_enum__cudaMemcpyKind(__pyx_t_4)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 21389, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_cykind = __pyx_t_7; /* "cuda/bindings/runtime.pyx":21390 * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpy2DToArray(cydst, wOffset, hOffset, cysrc_ptr, spitch, width, height, cykind) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":21391 * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: * err = cyruntime.cudaMemcpy2DToArray(cydst, wOffset, hOffset, cysrc_ptr, spitch, width, height, cykind) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemcpy2DToArray(__pyx_v_cydst, __pyx_v_wOffset, __pyx_v_hOffset, __pyx_v_cysrc_ptr, __pyx_v_spitch, __pyx_v_width, __pyx_v_height, __pyx_v_cykind); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21391, __pyx_L5_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":21390 * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpy2DToArray(cydst, wOffset, hOffset, cysrc_ptr, spitch, width, height, cykind) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L6; } __pyx_L5_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L6:; } } /* "cuda/bindings/runtime.pyx":21392 * with nogil: * err = cyruntime.cudaMemcpy2DToArray(cydst, wOffset, hOffset, cysrc_ptr, spitch, width, height, cykind) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21392, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21392, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21392, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21392, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 21392, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":21330 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpy2DToArray(dst, size_t wOffset, size_t hOffset, src, size_t spitch, size_t width, size_t height, kind not None : cudaMemcpyKind): * """ Copies data between host and device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpy2DToArray", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pdst); __Pyx_XDECREF((PyObject *)__pyx_v_cysrc); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":21394 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpy2DFromArray(dst, size_t dpitch, src, size_t wOffset, size_t hOffset, size_t width, size_t height, kind not None : cudaMemcpyKind): * """ Copies data between host and device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_245cudaMemcpy2DFromArray(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_244cudaMemcpy2DFromArray, "cudaMemcpy2DFromArray(dst, size_t dpitch, src, size_t wOffset, size_t hOffset, size_t width, size_t height, kind: cudaMemcpyKind)\n\nCopies data between host and device.\n\nCopies a matrix (`height` rows of `width` bytes each) from the CUDA\narray `src` starting at `hOffset` rows and `wOffset` bytes from the\nupper left corner to the memory area pointed to by `dst`, where `kind`\nspecifies the direction of the copy, and must be one of\n:py:obj:`~.cudaMemcpyHostToHost`, :py:obj:`~.cudaMemcpyHostToDevice`,\n:py:obj:`~.cudaMemcpyDeviceToHost`,\n:py:obj:`~.cudaMemcpyDeviceToDevice`, or :py:obj:`~.cudaMemcpyDefault`.\nPassing :py:obj:`~.cudaMemcpyDefault` is recommended, in which case the\ntype of transfer is inferred from the pointer values. However,\n:py:obj:`~.cudaMemcpyDefault` is only allowed on systems that support\nunified virtual addressing. `dpitch` is the width in memory in bytes of\nthe 2D array pointed to by `dst`, including any padding added to the\nend of each row. `wOffset` + `width` must not exceed the width of the\nCUDA array `src`. `width` must not exceed `dpitch`.\n:py:obj:`~.cudaMemcpy2DFromArray()` returns an error if `dpitch`\nexceeds the maximum allowed.\n\nParameters\n----------\ndst : Any\n Destination memory address\ndpitch : size_t\n Pitch of destination memory\nsrc : :py:obj:`~.cudaArray_const_t`\n Source memory address\nwOffset : size_t\n Source starting X offset (columns in bytes)\nhOffset : size_t\n Source starting Y offset (rows)\nwidth : size_t\n Width of matrix transfer (columns in bytes)\nheight : size_t\n Height of matrix transfer (rows)\nkind : :py:obj:`~.cudaMemcpyKind`\n Type of transfer\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidPitchValue`, :py:obj:`~.cudaErrorInvalidMemcpyDirection`\n\nSee Also\n--------\n:py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpy2D`, :py:obj:`~.cudaMemcpy2DToArray`, :py:obj:`~.cudaMemcpy2DArrayToArray`, :""py:obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy2DAsync`, :py:obj:`~.cudaMemcpy2DToArrayAsync`, :py:obj:`~.cudaMemcpy2DFromArrayAsync`, :py:obj:`~.cudaMemcpyToSymbolAsync`, :py:obj:`~.cudaMemcpyFromSymbolAsync`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DUnaligned`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_245cudaMemcpy2DFromArray = {"cudaMemcpy2DFromArray", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_245cudaMemcpy2DFromArray, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_244cudaMemcpy2DFromArray}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_245cudaMemcpy2DFromArray(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_dst = 0; size_t __pyx_v_dpitch; PyObject *__pyx_v_src = 0; size_t __pyx_v_wOffset; size_t __pyx_v_hOffset; size_t __pyx_v_width; size_t __pyx_v_height; PyObject *__pyx_v_kind = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[8] = {0,0,0,0,0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemcpy2DFromArray (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_dst_2,&__pyx_mstate_global->__pyx_n_u_dpitch,&__pyx_mstate_global->__pyx_n_u_src_2,&__pyx_mstate_global->__pyx_n_u_wOffset,&__pyx_mstate_global->__pyx_n_u_hOffset,&__pyx_mstate_global->__pyx_n_u_width_2,&__pyx_mstate_global->__pyx_n_u_height_2,&__pyx_mstate_global->__pyx_n_u_kind_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21394, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 8: values[7] = __Pyx_ArgRef_FASTCALL(__pyx_args, 7); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[7])) __PYX_ERR(0, 21394, __pyx_L3_error) CYTHON_FALLTHROUGH; case 7: values[6] = __Pyx_ArgRef_FASTCALL(__pyx_args, 6); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[6])) __PYX_ERR(0, 21394, __pyx_L3_error) CYTHON_FALLTHROUGH; case 6: values[5] = __Pyx_ArgRef_FASTCALL(__pyx_args, 5); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[5])) __PYX_ERR(0, 21394, __pyx_L3_error) CYTHON_FALLTHROUGH; case 5: values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 21394, __pyx_L3_error) CYTHON_FALLTHROUGH; case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 21394, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 21394, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21394, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21394, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemcpy2DFromArray", 0) < (0)) __PYX_ERR(0, 21394, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 8; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemcpy2DFromArray", 1, 8, 8, i); __PYX_ERR(0, 21394, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 8)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21394, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21394, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 21394, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 21394, __pyx_L3_error) values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 21394, __pyx_L3_error) values[5] = __Pyx_ArgRef_FASTCALL(__pyx_args, 5); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[5])) __PYX_ERR(0, 21394, __pyx_L3_error) values[6] = __Pyx_ArgRef_FASTCALL(__pyx_args, 6); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[6])) __PYX_ERR(0, 21394, __pyx_L3_error) values[7] = __Pyx_ArgRef_FASTCALL(__pyx_args, 7); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[7])) __PYX_ERR(0, 21394, __pyx_L3_error) } __pyx_v_dst = values[0]; __pyx_v_dpitch = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_dpitch == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 21395, __pyx_L3_error) __pyx_v_src = values[2]; __pyx_v_wOffset = __Pyx_PyLong_As_size_t(values[3]); if (unlikely((__pyx_v_wOffset == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 21395, __pyx_L3_error) __pyx_v_hOffset = __Pyx_PyLong_As_size_t(values[4]); if (unlikely((__pyx_v_hOffset == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 21395, __pyx_L3_error) __pyx_v_width = __Pyx_PyLong_As_size_t(values[5]); if (unlikely((__pyx_v_width == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 21395, __pyx_L3_error) __pyx_v_height = __Pyx_PyLong_As_size_t(values[6]); if (unlikely((__pyx_v_height == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 21395, __pyx_L3_error) __pyx_v_kind = values[7]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemcpy2DFromArray", 1, 8, 8, __pyx_nargs); __PYX_ERR(0, 21394, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpy2DFromArray", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_kind) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "kind"); __PYX_ERR(0, 21395, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_244cudaMemcpy2DFromArray(__pyx_self, __pyx_v_dst, __pyx_v_dpitch, __pyx_v_src, __pyx_v_wOffset, __pyx_v_hOffset, __pyx_v_width, __pyx_v_height, __pyx_v_kind); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_244cudaMemcpy2DFromArray(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_dst, size_t __pyx_v_dpitch, PyObject *__pyx_v_src, size_t __pyx_v_wOffset, size_t __pyx_v_hOffset, size_t __pyx_v_width, size_t __pyx_v_height, PyObject *__pyx_v_kind) { cudaArray_const_t __pyx_v_cysrc; PyObject *__pyx_v_psrc = NULL; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cydst = NULL; void *__pyx_v_cydst_ptr; enum cudaMemcpyKind __pyx_v_cykind; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; enum cudaMemcpyKind __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemcpy2DFromArray", 0); /* "cuda/bindings/runtime.pyx":21444 * """ * cdef cyruntime.cudaArray_const_t cysrc * if src is None: # <<<<<<<<<<<<<< * psrc = 0 * elif isinstance(src, (cudaArray_const_t,)): */ __pyx_t_1 = (__pyx_v_src == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":21445 * cdef cyruntime.cudaArray_const_t cysrc * if src is None: * psrc = 0 # <<<<<<<<<<<<<< * elif isinstance(src, (cudaArray_const_t,)): * psrc = int(src) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_psrc = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":21444 * """ * cdef cyruntime.cudaArray_const_t cysrc * if src is None: # <<<<<<<<<<<<<< * psrc = 0 * elif isinstance(src, (cudaArray_const_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":21446 * if src is None: * psrc = 0 * elif isinstance(src, (cudaArray_const_t,)): # <<<<<<<<<<<<<< * psrc = int(src) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_src, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_const_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":21447 * psrc = 0 * elif isinstance(src, (cudaArray_const_t,)): * psrc = int(src) # <<<<<<<<<<<<<< * else: * psrc = int(cudaArray_const_t(src)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_src); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_psrc = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":21446 * if src is None: * psrc = 0 * elif isinstance(src, (cudaArray_const_t,)): # <<<<<<<<<<<<<< * psrc = int(src) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":21449 * psrc = int(src) * else: * psrc = int(cudaArray_const_t(src)) # <<<<<<<<<<<<<< * cysrc = psrc * cydst = _HelperInputVoidPtr(dst) */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_const_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_const_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_src}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21449, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21449, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_psrc = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":21450 * else: * psrc = int(cudaArray_const_t(src)) * cysrc = psrc # <<<<<<<<<<<<<< * cydst = _HelperInputVoidPtr(dst) * cdef void* cydst_ptr = cydst.cptr */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_psrc); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 21450, __pyx_L1_error) __pyx_v_cysrc = ((cudaArray_const_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":21451 * psrc = int(cudaArray_const_t(src)) * cysrc = psrc * cydst = _HelperInputVoidPtr(dst) # <<<<<<<<<<<<<< * cdef void* cydst_ptr = cydst.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_dst}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21451, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_4); } __pyx_v_cydst = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_4); __pyx_t_4 = 0; /* "cuda/bindings/runtime.pyx":21452 * cysrc = psrc * cydst = _HelperInputVoidPtr(dst) * cdef void* cydst_ptr = cydst.cptr # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cydst), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21452, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_4); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 21452, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_cydst_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":21453 * cydst = _HelperInputVoidPtr(dst) * cdef void* cydst_ptr = cydst.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemcpy2DFromArray(cydst_ptr, dpitch, cysrc, wOffset, hOffset, width, height, cykind) */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_kind, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21453, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_7 = ((enum cudaMemcpyKind)__Pyx_PyLong_As_enum__cudaMemcpyKind(__pyx_t_4)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 21453, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_cykind = __pyx_t_7; /* "cuda/bindings/runtime.pyx":21454 * cdef void* cydst_ptr = cydst.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpy2DFromArray(cydst_ptr, dpitch, cysrc, wOffset, hOffset, width, height, cykind) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":21455 * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: * err = cyruntime.cudaMemcpy2DFromArray(cydst_ptr, dpitch, cysrc, wOffset, hOffset, width, height, cykind) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemcpy2DFromArray(__pyx_v_cydst_ptr, __pyx_v_dpitch, __pyx_v_cysrc, __pyx_v_wOffset, __pyx_v_hOffset, __pyx_v_width, __pyx_v_height, __pyx_v_cykind); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21455, __pyx_L5_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":21454 * cdef void* cydst_ptr = cydst.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpy2DFromArray(cydst_ptr, dpitch, cysrc, wOffset, hOffset, width, height, cykind) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L6; } __pyx_L5_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L6:; } } /* "cuda/bindings/runtime.pyx":21456 * with nogil: * err = cyruntime.cudaMemcpy2DFromArray(cydst_ptr, dpitch, cysrc, wOffset, hOffset, width, height, cykind) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21456, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21456, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21456, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21456, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 21456, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":21394 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpy2DFromArray(dst, size_t dpitch, src, size_t wOffset, size_t hOffset, size_t width, size_t height, kind not None : cudaMemcpyKind): * """ Copies data between host and device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpy2DFromArray", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_psrc); __Pyx_XDECREF((PyObject *)__pyx_v_cydst); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":21458 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpy2DArrayToArray(dst, size_t wOffsetDst, size_t hOffsetDst, src, size_t wOffsetSrc, size_t hOffsetSrc, size_t width, size_t height, kind not None : cudaMemcpyKind): * """ Copies data between host and device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_247cudaMemcpy2DArrayToArray(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_246cudaMemcpy2DArrayToArray, "cudaMemcpy2DArrayToArray(dst, size_t wOffsetDst, size_t hOffsetDst, src, size_t wOffsetSrc, size_t hOffsetSrc, size_t width, size_t height, kind: cudaMemcpyKind)\n\nCopies data between host and device.\n\nCopies a matrix (`height` rows of `width` bytes each) from the CUDA\narray `src` starting at `hOffsetSrc` rows and `wOffsetSrc` bytes from\nthe upper left corner to the CUDA array `dst` starting at `hOffsetDst`\nrows and `wOffsetDst` bytes from the upper left corner, where `kind`\nspecifies the direction of the copy, and must be one of\n:py:obj:`~.cudaMemcpyHostToHost`, :py:obj:`~.cudaMemcpyHostToDevice`,\n:py:obj:`~.cudaMemcpyDeviceToHost`,\n:py:obj:`~.cudaMemcpyDeviceToDevice`, or :py:obj:`~.cudaMemcpyDefault`.\nPassing :py:obj:`~.cudaMemcpyDefault` is recommended, in which case the\ntype of transfer is inferred from the pointer values. However,\n:py:obj:`~.cudaMemcpyDefault` is only allowed on systems that support\nunified virtual addressing. `wOffsetDst` + `width` must not exceed the\nwidth of the CUDA array `dst`. `wOffsetSrc` + `width` must not exceed\nthe width of the CUDA array `src`.\n\nParameters\n----------\ndst : :py:obj:`~.cudaArray_t`\n Destination memory address\nwOffsetDst : size_t\n Destination starting X offset (columns in bytes)\nhOffsetDst : size_t\n Destination starting Y offset (rows)\nsrc : :py:obj:`~.cudaArray_const_t`\n Source memory address\nwOffsetSrc : size_t\n Source starting X offset (columns in bytes)\nhOffsetSrc : size_t\n Source starting Y offset (rows)\nwidth : size_t\n Width of matrix transfer (columns in bytes)\nheight : size_t\n Height of matrix transfer (rows)\nkind : :py:obj:`~.cudaMemcpyKind`\n Type of transfer\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidMemcpyDirection`\n\nSee Also\n--------\n:py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpy2D`, :py:obj:`~.cudaMemcpy2DToArray`, :py:obj:`~.cudaMemcpy2DFromArray`, :py:obj:`~"".cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy2DAsync`, :py:obj:`~.cudaMemcpy2DToArrayAsync`, :py:obj:`~.cudaMemcpy2DFromArrayAsync`, :py:obj:`~.cudaMemcpyToSymbolAsync`, :py:obj:`~.cudaMemcpyFromSymbolAsync`, :py:obj:`~.cuMemcpy2D`, :py:obj:`~.cuMemcpy2DUnaligned`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_247cudaMemcpy2DArrayToArray = {"cudaMemcpy2DArrayToArray", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_247cudaMemcpy2DArrayToArray, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_246cudaMemcpy2DArrayToArray}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_247cudaMemcpy2DArrayToArray(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_dst = 0; size_t __pyx_v_wOffsetDst; size_t __pyx_v_hOffsetDst; PyObject *__pyx_v_src = 0; size_t __pyx_v_wOffsetSrc; size_t __pyx_v_hOffsetSrc; size_t __pyx_v_width; size_t __pyx_v_height; PyObject *__pyx_v_kind = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[9] = {0,0,0,0,0,0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemcpy2DArrayToArray (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_dst_2,&__pyx_mstate_global->__pyx_n_u_wOffsetDst,&__pyx_mstate_global->__pyx_n_u_hOffsetDst,&__pyx_mstate_global->__pyx_n_u_src_2,&__pyx_mstate_global->__pyx_n_u_wOffsetSrc,&__pyx_mstate_global->__pyx_n_u_hOffsetSrc,&__pyx_mstate_global->__pyx_n_u_width_2,&__pyx_mstate_global->__pyx_n_u_height_2,&__pyx_mstate_global->__pyx_n_u_kind_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21458, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 9: values[8] = __Pyx_ArgRef_FASTCALL(__pyx_args, 8); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[8])) __PYX_ERR(0, 21458, __pyx_L3_error) CYTHON_FALLTHROUGH; case 8: values[7] = __Pyx_ArgRef_FASTCALL(__pyx_args, 7); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[7])) __PYX_ERR(0, 21458, __pyx_L3_error) CYTHON_FALLTHROUGH; case 7: values[6] = __Pyx_ArgRef_FASTCALL(__pyx_args, 6); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[6])) __PYX_ERR(0, 21458, __pyx_L3_error) CYTHON_FALLTHROUGH; case 6: values[5] = __Pyx_ArgRef_FASTCALL(__pyx_args, 5); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[5])) __PYX_ERR(0, 21458, __pyx_L3_error) CYTHON_FALLTHROUGH; case 5: values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 21458, __pyx_L3_error) CYTHON_FALLTHROUGH; case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 21458, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 21458, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21458, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21458, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemcpy2DArrayToArray", 0) < (0)) __PYX_ERR(0, 21458, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 9; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemcpy2DArrayToArray", 1, 9, 9, i); __PYX_ERR(0, 21458, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 9)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21458, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21458, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 21458, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 21458, __pyx_L3_error) values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 21458, __pyx_L3_error) values[5] = __Pyx_ArgRef_FASTCALL(__pyx_args, 5); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[5])) __PYX_ERR(0, 21458, __pyx_L3_error) values[6] = __Pyx_ArgRef_FASTCALL(__pyx_args, 6); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[6])) __PYX_ERR(0, 21458, __pyx_L3_error) values[7] = __Pyx_ArgRef_FASTCALL(__pyx_args, 7); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[7])) __PYX_ERR(0, 21458, __pyx_L3_error) values[8] = __Pyx_ArgRef_FASTCALL(__pyx_args, 8); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[8])) __PYX_ERR(0, 21458, __pyx_L3_error) } __pyx_v_dst = values[0]; __pyx_v_wOffsetDst = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_wOffsetDst == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 21459, __pyx_L3_error) __pyx_v_hOffsetDst = __Pyx_PyLong_As_size_t(values[2]); if (unlikely((__pyx_v_hOffsetDst == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 21459, __pyx_L3_error) __pyx_v_src = values[3]; __pyx_v_wOffsetSrc = __Pyx_PyLong_As_size_t(values[4]); if (unlikely((__pyx_v_wOffsetSrc == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 21459, __pyx_L3_error) __pyx_v_hOffsetSrc = __Pyx_PyLong_As_size_t(values[5]); if (unlikely((__pyx_v_hOffsetSrc == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 21459, __pyx_L3_error) __pyx_v_width = __Pyx_PyLong_As_size_t(values[6]); if (unlikely((__pyx_v_width == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 21459, __pyx_L3_error) __pyx_v_height = __Pyx_PyLong_As_size_t(values[7]); if (unlikely((__pyx_v_height == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 21459, __pyx_L3_error) __pyx_v_kind = values[8]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemcpy2DArrayToArray", 1, 9, 9, __pyx_nargs); __PYX_ERR(0, 21458, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpy2DArrayToArray", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_kind) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "kind"); __PYX_ERR(0, 21459, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_246cudaMemcpy2DArrayToArray(__pyx_self, __pyx_v_dst, __pyx_v_wOffsetDst, __pyx_v_hOffsetDst, __pyx_v_src, __pyx_v_wOffsetSrc, __pyx_v_hOffsetSrc, __pyx_v_width, __pyx_v_height, __pyx_v_kind); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_246cudaMemcpy2DArrayToArray(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_dst, size_t __pyx_v_wOffsetDst, size_t __pyx_v_hOffsetDst, PyObject *__pyx_v_src, size_t __pyx_v_wOffsetSrc, size_t __pyx_v_hOffsetSrc, size_t __pyx_v_width, size_t __pyx_v_height, PyObject *__pyx_v_kind) { cudaArray_const_t __pyx_v_cysrc; PyObject *__pyx_v_psrc = NULL; cudaArray_t __pyx_v_cydst; PyObject *__pyx_v_pdst = NULL; enum cudaMemcpyKind __pyx_v_cykind; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; enum cudaMemcpyKind __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemcpy2DArrayToArray", 0); /* "cuda/bindings/runtime.pyx":21508 * """ * cdef cyruntime.cudaArray_const_t cysrc * if src is None: # <<<<<<<<<<<<<< * psrc = 0 * elif isinstance(src, (cudaArray_const_t,)): */ __pyx_t_1 = (__pyx_v_src == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":21509 * cdef cyruntime.cudaArray_const_t cysrc * if src is None: * psrc = 0 # <<<<<<<<<<<<<< * elif isinstance(src, (cudaArray_const_t,)): * psrc = int(src) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_psrc = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":21508 * """ * cdef cyruntime.cudaArray_const_t cysrc * if src is None: # <<<<<<<<<<<<<< * psrc = 0 * elif isinstance(src, (cudaArray_const_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":21510 * if src is None: * psrc = 0 * elif isinstance(src, (cudaArray_const_t,)): # <<<<<<<<<<<<<< * psrc = int(src) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_src, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_const_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":21511 * psrc = 0 * elif isinstance(src, (cudaArray_const_t,)): * psrc = int(src) # <<<<<<<<<<<<<< * else: * psrc = int(cudaArray_const_t(src)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_src); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21511, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_psrc = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":21510 * if src is None: * psrc = 0 * elif isinstance(src, (cudaArray_const_t,)): # <<<<<<<<<<<<<< * psrc = int(src) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":21513 * psrc = int(src) * else: * psrc = int(cudaArray_const_t(src)) # <<<<<<<<<<<<<< * cysrc = psrc * cdef cyruntime.cudaArray_t cydst */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_const_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_const_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_src}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21513, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21513, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_psrc = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":21514 * else: * psrc = int(cudaArray_const_t(src)) * cysrc = psrc # <<<<<<<<<<<<<< * cdef cyruntime.cudaArray_t cydst * if dst is None: */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_psrc); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 21514, __pyx_L1_error) __pyx_v_cysrc = ((cudaArray_const_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":21516 * cysrc = psrc * cdef cyruntime.cudaArray_t cydst * if dst is None: # <<<<<<<<<<<<<< * pdst = 0 * elif isinstance(dst, (cudaArray_t,)): */ __pyx_t_1 = (__pyx_v_dst == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":21517 * cdef cyruntime.cudaArray_t cydst * if dst is None: * pdst = 0 # <<<<<<<<<<<<<< * elif isinstance(dst, (cudaArray_t,)): * pdst = int(dst) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pdst = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":21516 * cysrc = psrc * cdef cyruntime.cudaArray_t cydst * if dst is None: # <<<<<<<<<<<<<< * pdst = 0 * elif isinstance(dst, (cudaArray_t,)): */ goto __pyx_L4; } /* "cuda/bindings/runtime.pyx":21518 * if dst is None: * pdst = 0 * elif isinstance(dst, (cudaArray_t,)): # <<<<<<<<<<<<<< * pdst = int(dst) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_dst, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":21519 * pdst = 0 * elif isinstance(dst, (cudaArray_t,)): * pdst = int(dst) # <<<<<<<<<<<<<< * else: * pdst = int(cudaArray_t(dst)) */ __pyx_t_4 = __Pyx_PyNumber_Int(__pyx_v_dst); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21519, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_v_pdst = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "cuda/bindings/runtime.pyx":21518 * if dst is None: * pdst = 0 * elif isinstance(dst, (cudaArray_t,)): # <<<<<<<<<<<<<< * pdst = int(dst) * else: */ goto __pyx_L4; } /* "cuda/bindings/runtime.pyx":21521 * pdst = int(dst) * else: * pdst = int(cudaArray_t(dst)) # <<<<<<<<<<<<<< * cydst = pdst * cdef cyruntime.cudaMemcpyKind cykind = kind.value */ /*else*/ { __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_dst}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21521, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_4); } __pyx_t_3 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_4)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21521, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF((PyObject *)__pyx_t_4); __pyx_t_4 = 0; __pyx_v_pdst = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; } __pyx_L4:; /* "cuda/bindings/runtime.pyx":21522 * else: * pdst = int(cudaArray_t(dst)) * cydst = pdst # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pdst); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 21522, __pyx_L1_error) __pyx_v_cydst = ((cudaArray_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":21523 * pdst = int(cudaArray_t(dst)) * cydst = pdst * cdef cyruntime.cudaMemcpyKind cykind = kind.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemcpy2DArrayToArray(cydst, wOffsetDst, hOffsetDst, cysrc, wOffsetSrc, hOffsetSrc, width, height, cykind) */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_kind, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21523, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_7 = ((enum cudaMemcpyKind)__Pyx_PyLong_As_enum__cudaMemcpyKind(__pyx_t_3)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 21523, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_cykind = __pyx_t_7; /* "cuda/bindings/runtime.pyx":21524 * cydst = pdst * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpy2DArrayToArray(cydst, wOffsetDst, hOffsetDst, cysrc, wOffsetSrc, hOffsetSrc, width, height, cykind) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":21525 * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: * err = cyruntime.cudaMemcpy2DArrayToArray(cydst, wOffsetDst, hOffsetDst, cysrc, wOffsetSrc, hOffsetSrc, width, height, cykind) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemcpy2DArrayToArray(__pyx_v_cydst, __pyx_v_wOffsetDst, __pyx_v_hOffsetDst, __pyx_v_cysrc, __pyx_v_wOffsetSrc, __pyx_v_hOffsetSrc, __pyx_v_width, __pyx_v_height, __pyx_v_cykind); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21525, __pyx_L6_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":21524 * cydst = pdst * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpy2DArrayToArray(cydst, wOffsetDst, hOffsetDst, cysrc, wOffsetSrc, hOffsetSrc, width, height, cykind) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L7; } __pyx_L6_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L7:; } } /* "cuda/bindings/runtime.pyx":21526 * with nogil: * err = cyruntime.cudaMemcpy2DArrayToArray(cydst, wOffsetDst, hOffsetDst, cysrc, wOffsetSrc, hOffsetSrc, width, height, cykind) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21526, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21526, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 21526, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21526, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 21526, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":21458 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpy2DArrayToArray(dst, size_t wOffsetDst, size_t hOffsetDst, src, size_t wOffsetSrc, size_t hOffsetSrc, size_t width, size_t height, kind not None : cudaMemcpyKind): * """ Copies data between host and device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpy2DArrayToArray", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_psrc); __Pyx_XDECREF(__pyx_v_pdst); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":21528 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpyAsync(dst, src, size_t count, kind not None : cudaMemcpyKind, stream): * """ Copies data between host and device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_249cudaMemcpyAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_248cudaMemcpyAsync, "cudaMemcpyAsync(dst, src, size_t count, kind: cudaMemcpyKind, stream)\n\nCopies data between host and device.\n\nCopies `count` bytes from the memory area pointed to by `src` to the\nmemory area pointed to by `dst`, where `kind` specifies the direction\nof the copy, and must be one of :py:obj:`~.cudaMemcpyHostToHost`,\n:py:obj:`~.cudaMemcpyHostToDevice`, :py:obj:`~.cudaMemcpyDeviceToHost`,\n:py:obj:`~.cudaMemcpyDeviceToDevice`, or :py:obj:`~.cudaMemcpyDefault`.\nPassing :py:obj:`~.cudaMemcpyDefault` is recommended, in which case the\ntype of transfer is inferred from the pointer values. However,\n:py:obj:`~.cudaMemcpyDefault` is only allowed on systems that support\nunified virtual addressing.\n\nThe memory areas may not overlap. Calling :py:obj:`~.cudaMemcpyAsync()`\nwith `dst` and `src` pointers that do not match the direction of the\ncopy results in an undefined behavior.\n\n:py:obj:`~.cudaMemcpyAsync()` is asynchronous with respect to the host,\nso the call may return before the copy is complete. The copy can\noptionally be associated to a stream by passing a non-zero `stream`\nargument. If `kind` is :py:obj:`~.cudaMemcpyHostToDevice` or\n:py:obj:`~.cudaMemcpyDeviceToHost` and the `stream` is non-zero, the\ncopy may overlap with operations in other streams.\n\nThe device version of this function only handles device to device\ncopies and cannot be given local or shared pointers.\n\nParameters\n----------\ndst : Any\n Destination memory address\nsrc : Any\n Source memory address\ncount : size_t\n Size in bytes to copy\nkind : :py:obj:`~.cudaMemcpyKind`\n Type of transfer\nstream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Stream identifier\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidMemcpyDirection`\n\nSee Also\n--------\n:py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpy2D`, :py:obj:`~.cudaMemcpy2DToArray`, :py:obj:`~.cudaMemcpy2DFromArray`, :py:obj:`~.cudaMemcpy2D""ArrayToArray`, :py:obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpy2DAsync`, :py:obj:`~.cudaMemcpy2DToArrayAsync`, :py:obj:`~.cudaMemcpy2DFromArrayAsync`, :py:obj:`~.cudaMemcpyToSymbolAsync`, :py:obj:`~.cudaMemcpyFromSymbolAsync`, :py:obj:`~.cuMemcpyAsync`, :py:obj:`~.cuMemcpyDtoHAsync`, :py:obj:`~.cuMemcpyHtoDAsync`, :py:obj:`~.cuMemcpyDtoDAsync`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_249cudaMemcpyAsync = {"cudaMemcpyAsync", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_249cudaMemcpyAsync, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_248cudaMemcpyAsync}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_249cudaMemcpyAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_dst = 0; PyObject *__pyx_v_src = 0; size_t __pyx_v_count; PyObject *__pyx_v_kind = 0; PyObject *__pyx_v_stream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[5] = {0,0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemcpyAsync (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_dst_2,&__pyx_mstate_global->__pyx_n_u_src_2,&__pyx_mstate_global->__pyx_n_u_count,&__pyx_mstate_global->__pyx_n_u_kind_2,&__pyx_mstate_global->__pyx_n_u_stream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21528, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 5: values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 21528, __pyx_L3_error) CYTHON_FALLTHROUGH; case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 21528, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 21528, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21528, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21528, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemcpyAsync", 0) < (0)) __PYX_ERR(0, 21528, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 5; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemcpyAsync", 1, 5, 5, i); __PYX_ERR(0, 21528, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 5)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21528, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21528, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 21528, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 21528, __pyx_L3_error) values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 21528, __pyx_L3_error) } __pyx_v_dst = values[0]; __pyx_v_src = values[1]; __pyx_v_count = __Pyx_PyLong_As_size_t(values[2]); if (unlikely((__pyx_v_count == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 21529, __pyx_L3_error) __pyx_v_kind = values[3]; __pyx_v_stream = values[4]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemcpyAsync", 1, 5, 5, __pyx_nargs); __PYX_ERR(0, 21528, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpyAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_kind) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "kind"); __PYX_ERR(0, 21529, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_248cudaMemcpyAsync(__pyx_self, __pyx_v_dst, __pyx_v_src, __pyx_v_count, __pyx_v_kind, __pyx_v_stream); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_248cudaMemcpyAsync(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src, size_t __pyx_v_count, PyObject *__pyx_v_kind, PyObject *__pyx_v_stream) { cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cydst = NULL; void *__pyx_v_cydst_ptr; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cysrc = NULL; void *__pyx_v_cysrc_ptr; enum cudaMemcpyKind __pyx_v_cykind; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; enum cudaMemcpyKind __pyx_t_8; cudaError_t __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemcpyAsync", 0); /* "cuda/bindings/runtime.pyx":21579 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":21580 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":21579 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":21581 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":21582 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21582, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":21581 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":21584 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * cydst = _HelperInputVoidPtr(dst) */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_stream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21584, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21584, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":21585 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * cydst = _HelperInputVoidPtr(dst) * cdef void* cydst_ptr = cydst.cptr */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 21585, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":21586 * pstream = int(cudaStream_t(stream)) * cystream = pstream * cydst = _HelperInputVoidPtr(dst) # <<<<<<<<<<<<<< * cdef void* cydst_ptr = cydst.cptr * cysrc = _HelperInputVoidPtr(src) */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_dst}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21586, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_cydst = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":21587 * cystream = pstream * cydst = _HelperInputVoidPtr(dst) * cdef void* cydst_ptr = cydst.cptr # <<<<<<<<<<<<<< * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cydst), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21587, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_5); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 21587, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_cydst_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":21588 * cydst = _HelperInputVoidPtr(dst) * cdef void* cydst_ptr = cydst.cptr * cysrc = _HelperInputVoidPtr(src) # <<<<<<<<<<<<<< * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value */ __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_src}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21588, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_cysrc = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":21589 * cdef void* cydst_ptr = cydst.cptr * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cysrc), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21589, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_5); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 21589, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_cysrc_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":21590 * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemcpyAsync(cydst_ptr, cysrc_ptr, count, cykind, cystream) */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_kind, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21590, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_8 = ((enum cudaMemcpyKind)__Pyx_PyLong_As_enum__cudaMemcpyKind(__pyx_t_5)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 21590, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_cykind = __pyx_t_8; /* "cuda/bindings/runtime.pyx":21591 * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpyAsync(cydst_ptr, cysrc_ptr, count, cykind, cystream) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":21592 * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: * err = cyruntime.cudaMemcpyAsync(cydst_ptr, cysrc_ptr, count, cykind, cystream) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_9 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemcpyAsync(__pyx_v_cydst_ptr, __pyx_v_cysrc_ptr, __pyx_v_count, __pyx_v_cykind, __pyx_v_cystream); if (unlikely(__pyx_t_9 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21592, __pyx_L7_error) __pyx_v_err = __pyx_t_9; } /* "cuda/bindings/runtime.pyx":21591 * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpyAsync(cydst_ptr, cysrc_ptr, count, cykind, cystream) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":21593 * with nogil: * err = cyruntime.cudaMemcpyAsync(cydst_ptr, cysrc_ptr, count, cykind, cystream) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21593, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21593, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21593, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21593, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 21593, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":21528 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpyAsync(dst, src, size_t count, kind not None : cudaMemcpyKind, stream): * """ Copies data between host and device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpyAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XDECREF((PyObject *)__pyx_v_cydst); __Pyx_XDECREF((PyObject *)__pyx_v_cysrc); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":21595 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpyPeerAsync(dst, int dstDevice, src, int srcDevice, size_t count, stream): * """ Copies memory between two devices asynchronously. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_251cudaMemcpyPeerAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_250cudaMemcpyPeerAsync, "cudaMemcpyPeerAsync(dst, int dstDevice, src, int srcDevice, size_t count, stream)\n\nCopies memory between two devices asynchronously.\n\nCopies memory from one device to memory on another device. `dst` is the\nbase device pointer of the destination memory and `dstDevice` is the\ndestination device. `src` is the base device pointer of the source\nmemory and `srcDevice` is the source device. `count` specifies the\nnumber of bytes to copy.\n\nNote that this function is asynchronous with respect to the host and\nall work on other devices.\n\nParameters\n----------\ndst : Any\n Destination device pointer\ndstDevice : int\n Destination device\nsrc : Any\n Source device pointer\nsrcDevice : int\n Source device\ncount : size_t\n Size of memory copy in bytes\nstream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Stream identifier\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidDevice`\n\nSee Also\n--------\n:py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpyPeer`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy3DPeerAsync`, :py:obj:`~.cuMemcpyPeerAsync`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_251cudaMemcpyPeerAsync = {"cudaMemcpyPeerAsync", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_251cudaMemcpyPeerAsync, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_250cudaMemcpyPeerAsync}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_251cudaMemcpyPeerAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_dst = 0; int __pyx_v_dstDevice; PyObject *__pyx_v_src = 0; int __pyx_v_srcDevice; size_t __pyx_v_count; PyObject *__pyx_v_stream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[6] = {0,0,0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemcpyPeerAsync (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_dst_2,&__pyx_mstate_global->__pyx_n_u_dstDevice_2,&__pyx_mstate_global->__pyx_n_u_src_2,&__pyx_mstate_global->__pyx_n_u_srcDevice_2,&__pyx_mstate_global->__pyx_n_u_count,&__pyx_mstate_global->__pyx_n_u_stream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21595, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 6: values[5] = __Pyx_ArgRef_FASTCALL(__pyx_args, 5); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[5])) __PYX_ERR(0, 21595, __pyx_L3_error) CYTHON_FALLTHROUGH; case 5: values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 21595, __pyx_L3_error) CYTHON_FALLTHROUGH; case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 21595, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 21595, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21595, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21595, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemcpyPeerAsync", 0) < (0)) __PYX_ERR(0, 21595, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 6; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemcpyPeerAsync", 1, 6, 6, i); __PYX_ERR(0, 21595, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 6)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21595, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21595, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 21595, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 21595, __pyx_L3_error) values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 21595, __pyx_L3_error) values[5] = __Pyx_ArgRef_FASTCALL(__pyx_args, 5); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[5])) __PYX_ERR(0, 21595, __pyx_L3_error) } __pyx_v_dst = values[0]; __pyx_v_dstDevice = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_dstDevice == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 21596, __pyx_L3_error) __pyx_v_src = values[2]; __pyx_v_srcDevice = __Pyx_PyLong_As_int(values[3]); if (unlikely((__pyx_v_srcDevice == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 21596, __pyx_L3_error) __pyx_v_count = __Pyx_PyLong_As_size_t(values[4]); if (unlikely((__pyx_v_count == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 21596, __pyx_L3_error) __pyx_v_stream = values[5]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemcpyPeerAsync", 1, 6, 6, __pyx_nargs); __PYX_ERR(0, 21595, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpyPeerAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_250cudaMemcpyPeerAsync(__pyx_self, __pyx_v_dst, __pyx_v_dstDevice, __pyx_v_src, __pyx_v_srcDevice, __pyx_v_count, __pyx_v_stream); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_250cudaMemcpyPeerAsync(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_dst, int __pyx_v_dstDevice, PyObject *__pyx_v_src, int __pyx_v_srcDevice, size_t __pyx_v_count, PyObject *__pyx_v_stream) { cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cydst = NULL; void *__pyx_v_cydst_ptr; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cysrc = NULL; void *__pyx_v_cysrc_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemcpyPeerAsync", 0); /* "cuda/bindings/runtime.pyx":21633 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":21634 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":21633 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":21635 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":21636 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21636, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":21635 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":21638 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * cydst = _HelperInputVoidPtr(dst) */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_stream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21638, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21638, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":21639 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * cydst = _HelperInputVoidPtr(dst) * cdef void* cydst_ptr = cydst.cptr */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 21639, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":21640 * pstream = int(cudaStream_t(stream)) * cystream = pstream * cydst = _HelperInputVoidPtr(dst) # <<<<<<<<<<<<<< * cdef void* cydst_ptr = cydst.cptr * cysrc = _HelperInputVoidPtr(src) */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_dst}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21640, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_cydst = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":21641 * cystream = pstream * cydst = _HelperInputVoidPtr(dst) * cdef void* cydst_ptr = cydst.cptr # <<<<<<<<<<<<<< * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cydst), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21641, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_5); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 21641, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_cydst_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":21642 * cydst = _HelperInputVoidPtr(dst) * cdef void* cydst_ptr = cydst.cptr * cysrc = _HelperInputVoidPtr(src) # <<<<<<<<<<<<<< * cdef void* cysrc_ptr = cysrc.cptr * with nogil: */ __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_src}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21642, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_cysrc = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":21643 * cdef void* cydst_ptr = cydst.cptr * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemcpyPeerAsync(cydst_ptr, dstDevice, cysrc_ptr, srcDevice, count, cystream) */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cysrc), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21643, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_5); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 21643, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_cysrc_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":21644 * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpyPeerAsync(cydst_ptr, dstDevice, cysrc_ptr, srcDevice, count, cystream) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":21645 * cdef void* cysrc_ptr = cysrc.cptr * with nogil: * err = cyruntime.cudaMemcpyPeerAsync(cydst_ptr, dstDevice, cysrc_ptr, srcDevice, count, cystream) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemcpyPeerAsync(__pyx_v_cydst_ptr, __pyx_v_dstDevice, __pyx_v_cysrc_ptr, __pyx_v_srcDevice, __pyx_v_count, __pyx_v_cystream); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21645, __pyx_L7_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":21644 * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpyPeerAsync(cydst_ptr, dstDevice, cysrc_ptr, srcDevice, count, cystream) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":21646 * with nogil: * err = cyruntime.cudaMemcpyPeerAsync(cydst_ptr, dstDevice, cysrc_ptr, srcDevice, count, cystream) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21646, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21646, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21646, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21646, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 21646, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":21595 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpyPeerAsync(dst, int dstDevice, src, int srcDevice, size_t count, stream): * """ Copies memory between two devices asynchronously. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpyPeerAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XDECREF((PyObject *)__pyx_v_cydst); __Pyx_XDECREF((PyObject *)__pyx_v_cysrc); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":21648 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpyBatchAsync(dsts : Optional[tuple[Any] | list[Any]], srcs : Optional[tuple[Any] | list[Any]], sizes : tuple[int] | list[int], size_t count, attrs : Optional[tuple[cudaMemcpyAttributes] | list[cudaMemcpyAttributes]], attrsIdxs : tuple[int] | list[int], size_t numAttrs, stream): * """ Performs a batch of memory copies asynchronously. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_253cudaMemcpyBatchAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_252cudaMemcpyBatchAsync, "cudaMemcpyBatchAsync(dsts: Optional[tuple[Any] | list[Any]], srcs: Optional[tuple[Any] | list[Any]], sizes: tuple[int] | list[int], size_t count, attrs: Optional[tuple[cudaMemcpyAttributes] | list[cudaMemcpyAttributes]], attrsIdxs: tuple[int] | list[int], size_t numAttrs, stream)\n\nPerforms a batch of memory copies asynchronously.\n\nPerforms a batch of memory copies. The batch as a whole executes in\nstream order but copies within a batch are not guaranteed to execute in\nany specific order. This API only supports pointer-to-pointer copies.\nFor copies involving CUDA arrays, please see\n:py:obj:`~.cudaMemcpy3DBatchAsync`.\n\nPerforms memory copies from source buffers specified in `srcs` to\ndestination buffers specified in `dsts`. The size of each copy is\nspecified in `sizes`. All three arrays must be of the same length as\nspecified by `count`. Since there are no ordering guarantees for copies\nwithin a batch, specifying any dependent copies within a batch will\nresult in undefined behavior.\n\nEvery copy in the batch has to be associated with a set of attributes\nspecified in the `attrs` array. Each entry in this array can apply to\nmore than one copy. This can be done by specifying in the `attrsIdxs`\narray, the index of the first copy that the corresponding entry in the\n`attrs` array applies to. Both `attrs` and `attrsIdxs` must be of the\nsame length as specified by `numAttrs`. For example, if a batch has 10\ncopies listed in dst/src/sizes, the first 6 of which have one set of\nattributes and the remaining 4 another, then `numAttrs` will be 2,\n`attrsIdxs` will be {0, 6} and `attrs` will contains the two sets of\nattributes. Note that the first entry in `attrsIdxs` must always be 0.\nAlso, each entry must be greater than the previous entry and the last\nentry should be less than `count`. Furthermore, `numAttrs` must be\nlesser than or equal to `count`.\n\nThe :py:obj:`~.cudaMemcpyAttributes.srcAccessOrder` indicates the\nsource access ordering to be observe""d for copies associated with the\nattribute. If the source access order is set to\n:py:obj:`~.cudaMemcpySrcAccessOrderStream`, then the source will be\naccessed in stream order. If the source access order is set to\n:py:obj:`~.cudaMemcpySrcAccessOrderDuringApiCall` then it indicates\nthat access to the source pointer can be out of stream order and all\naccesses must be complete before the API call returns. This flag is\nsuited for ephemeral sources (ex., stack variables) when it's known\nthat no prior operations in the stream can be accessing the memory and\nalso that the lifetime of the memory is limited to the scope that the\nsource variable was declared in. Specifying this flag allows the driver\nto optimize the copy and removes the need for the user to synchronize\nthe stream after the API call. If the source access order is set to\n:py:obj:`~.cudaMemcpySrcAccessOrderAny` then it indicates that access\nto the source pointer can be out of stream order and the accesses can\nhappen even after the API call returns. This flag is suited for host\npointers allocated outside CUDA (ex., via malloc) when it's known that\nno prior operations in the stream can be accessing the memory.\nSpecifying this flag allows the driver to optimize the copy on certain\nplatforms. Each memcpy operation in the batch must have a valid\n:py:obj:`~.cudaMemcpyAttributes` corresponding to it including the\nappropriate srcAccessOrder setting, otherwise the API will return\n:py:obj:`~.cudaErrorInvalidValue`.\n\nThe :py:obj:`~.cudaMemcpyAttributes.srcLocHint` and\n:py:obj:`~.cudaMemcpyAttributes.dstLocHint` allows applications to\nspecify hint locations for operands of a copy when the operand doesn't\nhave a fixed location. That is, these hints are only applicable for\nmanaged memory pointers on devices where\n:py:obj:`~.cudaDevAttrConcurrentManagedAccess` is true or system-\nallocated pageable memory on devices where\n:py:obj:`~.cudaDevAttrPageableMemoryAccess` is true. For other cases,\nthese h""ints are ignored.\n\nThe :py:obj:`~.cudaMemcpyAttributes.flags` field can be used to specify\ncertain flags for copies. Setting the\n:py:obj:`~.cudaMemcpyFlagPreferOverlapWithCompute` flag indicates that\nthe associated copies should preferably overlap with any compute work.\nNote that this flag is a hint and can be ignored depending on the\nplatform and other parameters of the copy.\n\nIf any error is encountered while parsing the batch, the index within\nthe batch where the error was encountered will be returned in\n`failIdx`.\n\nParameters\n----------\ndsts : list[Any]\n Array of destination pointers.\nsrcs : list[Any]\n Array of memcpy source pointers.\nsizes : list[int]\n Array of sizes for memcpy operations.\ncount : size_t\n Size of `dsts`, `srcs` and `sizes` arrays\nattrs : list[:py:obj:`~.cudaMemcpyAttributes`]\n Array of memcpy attributes.\nattrsIdxs : list[int]\n Array of indices to specify which copies each entry in the `attrs`\n array applies to. The attributes specified in attrs[k] will be\n applied to copies starting from attrsIdxs[k] through attrsIdxs[k+1]\n - 1. Also attrs[numAttrs-1] will apply to copies starting from\n attrsIdxs[numAttrs-1] through count - 1.\nnumAttrs : size_t\n Size of `attrs` and `attrsIdxs` arrays.\nhStream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n The stream to enqueue the operations in. Must not be legacy NULL\n stream.\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess` :py:obj:`~.cudaErrorInvalidValue`\nfailIdx : int\n Pointer to a location to return the index of the copy where a\n failure was encountered. The value will be SIZE_MAX if the error\n doesn't pertain to any specific copy."); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_253cudaMemcpyBatchAsync = {"cudaMemcpyBatchAsync", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_253cudaMemcpyBatchAsync, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_252cudaMemcpyBatchAsync}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_253cudaMemcpyBatchAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_dsts = 0; PyObject *__pyx_v_srcs = 0; PyObject *__pyx_v_sizes = 0; size_t __pyx_v_count; PyObject *__pyx_v_attrs = 0; PyObject *__pyx_v_attrsIdxs = 0; size_t __pyx_v_numAttrs; PyObject *__pyx_v_stream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[8] = {0,0,0,0,0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemcpyBatchAsync (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_dsts,&__pyx_mstate_global->__pyx_n_u_srcs,&__pyx_mstate_global->__pyx_n_u_sizes,&__pyx_mstate_global->__pyx_n_u_count,&__pyx_mstate_global->__pyx_n_u_attrs,&__pyx_mstate_global->__pyx_n_u_attrsIdxs,&__pyx_mstate_global->__pyx_n_u_numAttrs,&__pyx_mstate_global->__pyx_n_u_stream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21648, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 8: values[7] = __Pyx_ArgRef_FASTCALL(__pyx_args, 7); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[7])) __PYX_ERR(0, 21648, __pyx_L3_error) CYTHON_FALLTHROUGH; case 7: values[6] = __Pyx_ArgRef_FASTCALL(__pyx_args, 6); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[6])) __PYX_ERR(0, 21648, __pyx_L3_error) CYTHON_FALLTHROUGH; case 6: values[5] = __Pyx_ArgRef_FASTCALL(__pyx_args, 5); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[5])) __PYX_ERR(0, 21648, __pyx_L3_error) CYTHON_FALLTHROUGH; case 5: values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 21648, __pyx_L3_error) CYTHON_FALLTHROUGH; case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 21648, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 21648, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21648, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21648, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemcpyBatchAsync", 0) < (0)) __PYX_ERR(0, 21648, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 8; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemcpyBatchAsync", 1, 8, 8, i); __PYX_ERR(0, 21648, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 8)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21648, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21648, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 21648, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 21648, __pyx_L3_error) values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 21648, __pyx_L3_error) values[5] = __Pyx_ArgRef_FASTCALL(__pyx_args, 5); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[5])) __PYX_ERR(0, 21648, __pyx_L3_error) values[6] = __Pyx_ArgRef_FASTCALL(__pyx_args, 6); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[6])) __PYX_ERR(0, 21648, __pyx_L3_error) values[7] = __Pyx_ArgRef_FASTCALL(__pyx_args, 7); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[7])) __PYX_ERR(0, 21648, __pyx_L3_error) } __pyx_v_dsts = values[0]; __pyx_v_srcs = values[1]; __pyx_v_sizes = values[2]; __pyx_v_count = __Pyx_PyLong_As_size_t(values[3]); if (unlikely((__pyx_v_count == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 21649, __pyx_L3_error) __pyx_v_attrs = values[4]; __pyx_v_attrsIdxs = values[5]; __pyx_v_numAttrs = __Pyx_PyLong_As_size_t(values[6]); if (unlikely((__pyx_v_numAttrs == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 21649, __pyx_L3_error) __pyx_v_stream = values[7]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemcpyBatchAsync", 1, 8, 8, __pyx_nargs); __PYX_ERR(0, 21648, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpyBatchAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_252cudaMemcpyBatchAsync(__pyx_self, __pyx_v_dsts, __pyx_v_srcs, __pyx_v_sizes, __pyx_v_count, __pyx_v_attrs, __pyx_v_attrsIdxs, __pyx_v_numAttrs, __pyx_v_stream); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_20cudaMemcpyBatchAsync_2generator87(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value); /* proto */ /* "cuda/bindings/runtime.pyx":21766 * pstream = int(cudaStream_t(stream)) * cystream = pstream * if not all(isinstance(_x, (int)) for _x in attrsIdxs): # <<<<<<<<<<<<<< * raise TypeError("Argument 'attrsIdxs' is not instance of type (expected tuple[int] or list[int]") * attrs = [] if attrs is None else attrs */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_20cudaMemcpyBatchAsync_genexpr(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_genexpr_arg_0) { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_87_genexpr *__pyx_cur_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("genexpr", 0); __pyx_cur_scope = (struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_87_genexpr *)__pyx_tp_new_4cuda_8bindings_7runtime___pyx_scope_struct_87_genexpr(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime___pyx_scope_struct_87_genexpr, __pyx_mstate_global->__pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_87_genexpr *)Py_None); __Pyx_INCREF(Py_None); __PYX_ERR(0, 21766, __pyx_L1_error) } else { __Pyx_GOTREF((PyObject *)__pyx_cur_scope); } __pyx_cur_scope->__pyx_genexpr_arg_0 = __pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_cur_scope->__pyx_genexpr_arg_0); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_genexpr_arg_0); { __pyx_CoroutineObject *gen = __Pyx_Generator_New((__pyx_coroutine_body_t) __pyx_gb_4cuda_8bindings_7runtime_20cudaMemcpyBatchAsync_2generator87, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[87]), (PyObject *) __pyx_cur_scope, __pyx_mstate_global->__pyx_n_u_genexpr, __pyx_mstate_global->__pyx_n_u_cudaMemcpyBatchAsync_locals_gene, __pyx_mstate_global->__pyx_n_u_cuda_bindings_runtime); if (unlikely(!gen)) __PYX_ERR(0, 21766, __pyx_L1_error) __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; } /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpyBatchAsync.genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_DECREF((PyObject *)__pyx_cur_scope); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_20cudaMemcpyBatchAsync_2generator87(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value) /* generator body */ { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_87_genexpr *__pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_87_genexpr *)__pyx_generator->closure); PyObject *__pyx_r = NULL; PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *(*__pyx_t_3)(PyObject *); PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("genexpr", 0); switch (__pyx_generator->resume_label) { case 0: goto __pyx_L3_first_run; default: /* CPython raises the right error here */ __Pyx_RefNannyFinishContext(); return NULL; } __pyx_L3_first_run:; if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 21766, __pyx_L1_error) if (unlikely(!__pyx_cur_scope->__pyx_genexpr_arg_0)) { __Pyx_RaiseUnboundLocalError(".0"); __PYX_ERR(0, 21766, __pyx_L1_error) } if (likely(PyList_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) || PyTuple_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) { __pyx_t_1 = __pyx_cur_scope->__pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; __pyx_t_3 = NULL; } else { __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_cur_scope->__pyx_genexpr_arg_0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21766, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21766, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_3)) { if (likely(PyList_CheckExact(__pyx_t_1))) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 21766, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } __pyx_t_4 = __Pyx_PyList_GetItemRef(__pyx_t_1, __pyx_t_2); ++__pyx_t_2; } else { { Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 21766, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2)); #else __pyx_t_4 = __Pyx_PySequence_ITEM(__pyx_t_1, __pyx_t_2); #endif ++__pyx_t_2; } if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21766, __pyx_L1_error) } else { __pyx_t_4 = __pyx_t_3(__pyx_t_1); if (unlikely(!__pyx_t_4)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 21766, __pyx_L1_error) PyErr_Clear(); } break; } } __Pyx_GOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v__x); __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v__x, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = PyLong_Check(__pyx_cur_scope->__pyx_v__x); __pyx_t_6 = (!__pyx_t_5); if (__pyx_t_6) { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_False); __pyx_r = Py_False; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_True); __pyx_r = Py_True; goto __pyx_L0; } CYTHON_MAYBE_UNUSED_VAR(__pyx_cur_scope); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); if (__Pyx_PyErr_Occurred()) { __Pyx_Generator_Replace_StopIteration(0); __Pyx_AddTraceback("genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); } __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); #if !CYTHON_USE_EXC_INFO_STACK __Pyx_Coroutine_ResetAndClearException(__pyx_generator); #endif __pyx_generator->resume_label = -1; __Pyx_Coroutine_clear((PyObject*)__pyx_generator); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_20cudaMemcpyBatchAsync_5generator88(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value); /* proto */ /* "cuda/bindings/runtime.pyx":21769 * raise TypeError("Argument 'attrsIdxs' is not instance of type (expected tuple[int] or list[int]") * attrs = [] if attrs is None else attrs * if not all(isinstance(_x, (cudaMemcpyAttributes,)) for _x in attrs): # <<<<<<<<<<<<<< * raise TypeError("Argument 'attrs' is not instance of type (expected tuple[cyruntime.cudaMemcpyAttributes,] or list[cyruntime.cudaMemcpyAttributes,]") * if not all(isinstance(_x, (int)) for _x in sizes): */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_20cudaMemcpyBatchAsync_3genexpr(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_genexpr_arg_0) { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_88_genexpr *__pyx_cur_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("genexpr", 0); __pyx_cur_scope = (struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_88_genexpr *)__pyx_tp_new_4cuda_8bindings_7runtime___pyx_scope_struct_88_genexpr(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime___pyx_scope_struct_88_genexpr, __pyx_mstate_global->__pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_88_genexpr *)Py_None); __Pyx_INCREF(Py_None); __PYX_ERR(0, 21769, __pyx_L1_error) } else { __Pyx_GOTREF((PyObject *)__pyx_cur_scope); } __pyx_cur_scope->__pyx_genexpr_arg_0 = __pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_cur_scope->__pyx_genexpr_arg_0); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_genexpr_arg_0); { __pyx_CoroutineObject *gen = __Pyx_Generator_New((__pyx_coroutine_body_t) __pyx_gb_4cuda_8bindings_7runtime_20cudaMemcpyBatchAsync_5generator88, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[88]), (PyObject *) __pyx_cur_scope, __pyx_mstate_global->__pyx_n_u_genexpr, __pyx_mstate_global->__pyx_n_u_cudaMemcpyBatchAsync_locals_gene, __pyx_mstate_global->__pyx_n_u_cuda_bindings_runtime); if (unlikely(!gen)) __PYX_ERR(0, 21769, __pyx_L1_error) __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; } /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpyBatchAsync.genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_DECREF((PyObject *)__pyx_cur_scope); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_20cudaMemcpyBatchAsync_5generator88(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value) /* generator body */ { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_88_genexpr *__pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_88_genexpr *)__pyx_generator->closure); PyObject *__pyx_r = NULL; PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *(*__pyx_t_3)(PyObject *); PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("genexpr", 0); switch (__pyx_generator->resume_label) { case 0: goto __pyx_L3_first_run; default: /* CPython raises the right error here */ __Pyx_RefNannyFinishContext(); return NULL; } __pyx_L3_first_run:; if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 21769, __pyx_L1_error) if (unlikely(!__pyx_cur_scope->__pyx_genexpr_arg_0)) { __Pyx_RaiseUnboundLocalError(".0"); __PYX_ERR(0, 21769, __pyx_L1_error) } if (likely(PyList_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) || PyTuple_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) { __pyx_t_1 = __pyx_cur_scope->__pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; __pyx_t_3 = NULL; } else { __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_cur_scope->__pyx_genexpr_arg_0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21769, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21769, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_3)) { if (likely(PyList_CheckExact(__pyx_t_1))) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 21769, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } __pyx_t_4 = __Pyx_PyList_GetItemRef(__pyx_t_1, __pyx_t_2); ++__pyx_t_2; } else { { Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 21769, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2)); #else __pyx_t_4 = __Pyx_PySequence_ITEM(__pyx_t_1, __pyx_t_2); #endif ++__pyx_t_2; } if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21769, __pyx_L1_error) } else { __pyx_t_4 = __pyx_t_3(__pyx_t_1); if (unlikely(!__pyx_t_4)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 21769, __pyx_L1_error) PyErr_Clear(); } break; } } __Pyx_GOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v__x); __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v__x, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __Pyx_TypeCheck(__pyx_cur_scope->__pyx_v__x, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemcpyAttributes); __pyx_t_6 = (!__pyx_t_5); if (__pyx_t_6) { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_False); __pyx_r = Py_False; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_True); __pyx_r = Py_True; goto __pyx_L0; } CYTHON_MAYBE_UNUSED_VAR(__pyx_cur_scope); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); if (__Pyx_PyErr_Occurred()) { __Pyx_Generator_Replace_StopIteration(0); __Pyx_AddTraceback("genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); } __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); #if !CYTHON_USE_EXC_INFO_STACK __Pyx_Coroutine_ResetAndClearException(__pyx_generator); #endif __pyx_generator->resume_label = -1; __Pyx_Coroutine_clear((PyObject*)__pyx_generator); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_20cudaMemcpyBatchAsync_8generator89(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value); /* proto */ /* "cuda/bindings/runtime.pyx":21771 * if not all(isinstance(_x, (cudaMemcpyAttributes,)) for _x in attrs): * raise TypeError("Argument 'attrs' is not instance of type (expected tuple[cyruntime.cudaMemcpyAttributes,] or list[cyruntime.cudaMemcpyAttributes,]") * if not all(isinstance(_x, (int)) for _x in sizes): # <<<<<<<<<<<<<< * raise TypeError("Argument 'sizes' is not instance of type (expected tuple[int] or list[int]") * srcs = [] if srcs is None else srcs */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_20cudaMemcpyBatchAsync_6genexpr(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_genexpr_arg_0) { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_89_genexpr *__pyx_cur_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("genexpr", 0); __pyx_cur_scope = (struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_89_genexpr *)__pyx_tp_new_4cuda_8bindings_7runtime___pyx_scope_struct_89_genexpr(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime___pyx_scope_struct_89_genexpr, __pyx_mstate_global->__pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_89_genexpr *)Py_None); __Pyx_INCREF(Py_None); __PYX_ERR(0, 21771, __pyx_L1_error) } else { __Pyx_GOTREF((PyObject *)__pyx_cur_scope); } __pyx_cur_scope->__pyx_genexpr_arg_0 = __pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_cur_scope->__pyx_genexpr_arg_0); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_genexpr_arg_0); { __pyx_CoroutineObject *gen = __Pyx_Generator_New((__pyx_coroutine_body_t) __pyx_gb_4cuda_8bindings_7runtime_20cudaMemcpyBatchAsync_8generator89, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[89]), (PyObject *) __pyx_cur_scope, __pyx_mstate_global->__pyx_n_u_genexpr, __pyx_mstate_global->__pyx_n_u_cudaMemcpyBatchAsync_locals_gene, __pyx_mstate_global->__pyx_n_u_cuda_bindings_runtime); if (unlikely(!gen)) __PYX_ERR(0, 21771, __pyx_L1_error) __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; } /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpyBatchAsync.genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_DECREF((PyObject *)__pyx_cur_scope); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_20cudaMemcpyBatchAsync_8generator89(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value) /* generator body */ { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_89_genexpr *__pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_89_genexpr *)__pyx_generator->closure); PyObject *__pyx_r = NULL; PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *(*__pyx_t_3)(PyObject *); PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("genexpr", 0); switch (__pyx_generator->resume_label) { case 0: goto __pyx_L3_first_run; default: /* CPython raises the right error here */ __Pyx_RefNannyFinishContext(); return NULL; } __pyx_L3_first_run:; if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 21771, __pyx_L1_error) if (unlikely(!__pyx_cur_scope->__pyx_genexpr_arg_0)) { __Pyx_RaiseUnboundLocalError(".0"); __PYX_ERR(0, 21771, __pyx_L1_error) } if (likely(PyList_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) || PyTuple_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) { __pyx_t_1 = __pyx_cur_scope->__pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; __pyx_t_3 = NULL; } else { __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_cur_scope->__pyx_genexpr_arg_0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21771, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21771, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_3)) { if (likely(PyList_CheckExact(__pyx_t_1))) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 21771, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } __pyx_t_4 = __Pyx_PyList_GetItemRef(__pyx_t_1, __pyx_t_2); ++__pyx_t_2; } else { { Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 21771, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2)); #else __pyx_t_4 = __Pyx_PySequence_ITEM(__pyx_t_1, __pyx_t_2); #endif ++__pyx_t_2; } if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21771, __pyx_L1_error) } else { __pyx_t_4 = __pyx_t_3(__pyx_t_1); if (unlikely(!__pyx_t_4)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 21771, __pyx_L1_error) PyErr_Clear(); } break; } } __Pyx_GOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v__x); __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v__x, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = PyLong_Check(__pyx_cur_scope->__pyx_v__x); __pyx_t_6 = (!__pyx_t_5); if (__pyx_t_6) { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_False); __pyx_r = Py_False; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_True); __pyx_r = Py_True; goto __pyx_L0; } CYTHON_MAYBE_UNUSED_VAR(__pyx_cur_scope); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); if (__Pyx_PyErr_Occurred()) { __Pyx_Generator_Replace_StopIteration(0); __Pyx_AddTraceback("genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); } __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); #if !CYTHON_USE_EXC_INFO_STACK __Pyx_Coroutine_ResetAndClearException(__pyx_generator); #endif __pyx_generator->resume_label = -1; __Pyx_Coroutine_clear((PyObject*)__pyx_generator); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":21648 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpyBatchAsync(dsts : Optional[tuple[Any] | list[Any]], srcs : Optional[tuple[Any] | list[Any]], sizes : tuple[int] | list[int], size_t count, attrs : Optional[tuple[cudaMemcpyAttributes] | list[cudaMemcpyAttributes]], attrsIdxs : tuple[int] | list[int], size_t numAttrs, stream): * """ Performs a batch of memory copies asynchronously. */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_252cudaMemcpyBatchAsync(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_dsts, PyObject *__pyx_v_srcs, PyObject *__pyx_v_sizes, size_t __pyx_v_count, PyObject *__pyx_v_attrs, PyObject *__pyx_v_attrsIdxs, size_t __pyx_v_numAttrs, PyObject *__pyx_v_stream) { cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; PyObject *__pyx_v_pylist = NULL; struct __pyx_obj_4cuda_8bindings_7runtime__InputVoidPtrPtrHelper *__pyx_v_voidStarHelperdsts = 0; void **__pyx_v_cydsts_ptr; struct __pyx_obj_4cuda_8bindings_7runtime__InputVoidPtrPtrHelper *__pyx_v_voidStarHelpersrcs = 0; void **__pyx_v_cysrcs_ptr; std::vector __pyx_v_cysizes; struct cudaMemcpyAttributes *__pyx_v_cyattrs; Py_ssize_t __pyx_v_idx; std::vector __pyx_v_cyattrsIdxs; size_t __pyx_v_failIdx; cudaError_t __pyx_v_err; PyObject *__pyx_gb_4cuda_8bindings_7runtime_20cudaMemcpyBatchAsync_2generator87 = 0; PyObject *__pyx_gb_4cuda_8bindings_7runtime_20cudaMemcpyBatchAsync_5generator88 = 0; PyObject *__pyx_gb_4cuda_8bindings_7runtime_20cudaMemcpyBatchAsync_8generator89 = 0; PyObject *__pyx_10genexpr204__pyx_v_pydsts = NULL; PyObject *__pyx_10genexpr205__pyx_v_pysrcs = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; Py_ssize_t __pyx_t_8; PyObject *(*__pyx_t_9)(PyObject *); PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; std::vector __pyx_t_12; PyObject *__pyx_t_13 = NULL; Py_ssize_t __pyx_t_14; Py_ssize_t __pyx_t_15; struct cudaMemcpyAttributes *__pyx_t_16; cudaError_t __pyx_t_17; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemcpyBatchAsync", 0); __Pyx_INCREF(__pyx_v_dsts); __Pyx_INCREF(__pyx_v_srcs); __Pyx_INCREF(__pyx_v_attrs); /* "cuda/bindings/runtime.pyx":21759 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":21760 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":21759 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":21761 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":21762 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21762, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":21761 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":21764 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * if not all(isinstance(_x, (int)) for _x in attrsIdxs): */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_stream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21764, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21764, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":21765 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * if not all(isinstance(_x, (int)) for _x in attrsIdxs): * raise TypeError("Argument 'attrsIdxs' is not instance of type (expected tuple[int] or list[int]") */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 21765, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":21766 * pstream = int(cudaStream_t(stream)) * cystream = pstream * if not all(isinstance(_x, (int)) for _x in attrsIdxs): # <<<<<<<<<<<<<< * raise TypeError("Argument 'attrsIdxs' is not instance of type (expected tuple[int] or list[int]") * attrs = [] if attrs is None else attrs */ __pyx_t_5 = __pyx_pf_4cuda_8bindings_7runtime_20cudaMemcpyBatchAsync_genexpr(NULL, __pyx_v_attrsIdxs); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21766, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_Generator_GetInlinedResult(__pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21766, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 21766, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_2 = (!__pyx_t_1); if (unlikely(__pyx_t_2)) { /* "cuda/bindings/runtime.pyx":21767 * cystream = pstream * if not all(isinstance(_x, (int)) for _x in attrsIdxs): * raise TypeError("Argument 'attrsIdxs' is not instance of type (expected tuple[int] or list[int]") # <<<<<<<<<<<<<< * attrs = [] if attrs is None else attrs * if not all(isinstance(_x, (cudaMemcpyAttributes,)) for _x in attrs): */ __pyx_t_5 = NULL; __Pyx_INCREF(__pyx_builtin_TypeError); __pyx_t_4 = __pyx_builtin_TypeError; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_mstate_global->__pyx_kp_u_Argument_attrsIdxs_is_not_instan}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21767, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 21767, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":21766 * pstream = int(cudaStream_t(stream)) * cystream = pstream * if not all(isinstance(_x, (int)) for _x in attrsIdxs): # <<<<<<<<<<<<<< * raise TypeError("Argument 'attrsIdxs' is not instance of type (expected tuple[int] or list[int]") * attrs = [] if attrs is None else attrs */ } /* "cuda/bindings/runtime.pyx":21768 * if not all(isinstance(_x, (int)) for _x in attrsIdxs): * raise TypeError("Argument 'attrsIdxs' is not instance of type (expected tuple[int] or list[int]") * attrs = [] if attrs is None else attrs # <<<<<<<<<<<<<< * if not all(isinstance(_x, (cudaMemcpyAttributes,)) for _x in attrs): * raise TypeError("Argument 'attrs' is not instance of type (expected tuple[cyruntime.cudaMemcpyAttributes,] or list[cyruntime.cudaMemcpyAttributes,]") */ __pyx_t_2 = (__pyx_v_attrs == Py_None); if (__pyx_t_2) { __pyx_t_4 = PyList_New(0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21768, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; } else { __Pyx_INCREF(__pyx_v_attrs); __pyx_t_3 = __pyx_v_attrs; } __Pyx_DECREF_SET(__pyx_v_attrs, __pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":21769 * raise TypeError("Argument 'attrsIdxs' is not instance of type (expected tuple[int] or list[int]") * attrs = [] if attrs is None else attrs * if not all(isinstance(_x, (cudaMemcpyAttributes,)) for _x in attrs): # <<<<<<<<<<<<<< * raise TypeError("Argument 'attrs' is not instance of type (expected tuple[cyruntime.cudaMemcpyAttributes,] or list[cyruntime.cudaMemcpyAttributes,]") * if not all(isinstance(_x, (int)) for _x in sizes): */ __pyx_t_3 = __pyx_pf_4cuda_8bindings_7runtime_20cudaMemcpyBatchAsync_3genexpr(NULL, __pyx_v_attrs); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21769, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_Generator_GetInlinedResult(__pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21769, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 21769, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_1 = (!__pyx_t_2); if (unlikely(__pyx_t_1)) { /* "cuda/bindings/runtime.pyx":21770 * attrs = [] if attrs is None else attrs * if not all(isinstance(_x, (cudaMemcpyAttributes,)) for _x in attrs): * raise TypeError("Argument 'attrs' is not instance of type (expected tuple[cyruntime.cudaMemcpyAttributes,] or list[cyruntime.cudaMemcpyAttributes,]") # <<<<<<<<<<<<<< * if not all(isinstance(_x, (int)) for _x in sizes): * raise TypeError("Argument 'sizes' is not instance of type (expected tuple[int] or list[int]") */ __pyx_t_3 = NULL; __Pyx_INCREF(__pyx_builtin_TypeError); __pyx_t_5 = __pyx_builtin_TypeError; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Argument_attrs_is_not_instance_o}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21770, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); } __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(0, 21770, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":21769 * raise TypeError("Argument 'attrsIdxs' is not instance of type (expected tuple[int] or list[int]") * attrs = [] if attrs is None else attrs * if not all(isinstance(_x, (cudaMemcpyAttributes,)) for _x in attrs): # <<<<<<<<<<<<<< * raise TypeError("Argument 'attrs' is not instance of type (expected tuple[cyruntime.cudaMemcpyAttributes,] or list[cyruntime.cudaMemcpyAttributes,]") * if not all(isinstance(_x, (int)) for _x in sizes): */ } /* "cuda/bindings/runtime.pyx":21771 * if not all(isinstance(_x, (cudaMemcpyAttributes,)) for _x in attrs): * raise TypeError("Argument 'attrs' is not instance of type (expected tuple[cyruntime.cudaMemcpyAttributes,] or list[cyruntime.cudaMemcpyAttributes,]") * if not all(isinstance(_x, (int)) for _x in sizes): # <<<<<<<<<<<<<< * raise TypeError("Argument 'sizes' is not instance of type (expected tuple[int] or list[int]") * srcs = [] if srcs is None else srcs */ __pyx_t_4 = __pyx_pf_4cuda_8bindings_7runtime_20cudaMemcpyBatchAsync_6genexpr(NULL, __pyx_v_sizes); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21771, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_Generator_GetInlinedResult(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21771, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 21771, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_2 = (!__pyx_t_1); if (unlikely(__pyx_t_2)) { /* "cuda/bindings/runtime.pyx":21772 * raise TypeError("Argument 'attrs' is not instance of type (expected tuple[cyruntime.cudaMemcpyAttributes,] or list[cyruntime.cudaMemcpyAttributes,]") * if not all(isinstance(_x, (int)) for _x in sizes): * raise TypeError("Argument 'sizes' is not instance of type (expected tuple[int] or list[int]") # <<<<<<<<<<<<<< * srcs = [] if srcs is None else srcs * dsts = [] if dsts is None else dsts */ __pyx_t_4 = NULL; __Pyx_INCREF(__pyx_builtin_TypeError); __pyx_t_3 = __pyx_builtin_TypeError; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_mstate_global->__pyx_kp_u_Argument_sizes_is_not_instance_o}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21772, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); } __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(0, 21772, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":21771 * if not all(isinstance(_x, (cudaMemcpyAttributes,)) for _x in attrs): * raise TypeError("Argument 'attrs' is not instance of type (expected tuple[cyruntime.cudaMemcpyAttributes,] or list[cyruntime.cudaMemcpyAttributes,]") * if not all(isinstance(_x, (int)) for _x in sizes): # <<<<<<<<<<<<<< * raise TypeError("Argument 'sizes' is not instance of type (expected tuple[int] or list[int]") * srcs = [] if srcs is None else srcs */ } /* "cuda/bindings/runtime.pyx":21773 * if not all(isinstance(_x, (int)) for _x in sizes): * raise TypeError("Argument 'sizes' is not instance of type (expected tuple[int] or list[int]") * srcs = [] if srcs is None else srcs # <<<<<<<<<<<<<< * dsts = [] if dsts is None else dsts * pylist = [_HelperInputVoidPtr(pydsts) for pydsts in dsts] */ __pyx_t_2 = (__pyx_v_srcs == Py_None); if (__pyx_t_2) { __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21773, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __pyx_t_3; __pyx_t_3 = 0; } else { __Pyx_INCREF(__pyx_v_srcs); __pyx_t_5 = __pyx_v_srcs; } __Pyx_DECREF_SET(__pyx_v_srcs, __pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":21774 * raise TypeError("Argument 'sizes' is not instance of type (expected tuple[int] or list[int]") * srcs = [] if srcs is None else srcs * dsts = [] if dsts is None else dsts # <<<<<<<<<<<<<< * pylist = [_HelperInputVoidPtr(pydsts) for pydsts in dsts] * cdef _InputVoidPtrPtrHelper voidStarHelperdsts = _InputVoidPtrPtrHelper(pylist) */ __pyx_t_2 = (__pyx_v_dsts == Py_None); if (__pyx_t_2) { __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21774, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __pyx_t_3; __pyx_t_3 = 0; } else { __Pyx_INCREF(__pyx_v_dsts); __pyx_t_5 = __pyx_v_dsts; } __Pyx_DECREF_SET(__pyx_v_dsts, __pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":21775 * srcs = [] if srcs is None else srcs * dsts = [] if dsts is None else dsts * pylist = [_HelperInputVoidPtr(pydsts) for pydsts in dsts] # <<<<<<<<<<<<<< * cdef _InputVoidPtrPtrHelper voidStarHelperdsts = _InputVoidPtrPtrHelper(pylist) * cdef void** cydsts_ptr = voidStarHelperdsts.cptr */ { /* enter inner scope */ __pyx_t_5 = PyList_New(0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21775, __pyx_L11_error) __Pyx_GOTREF(__pyx_t_5); if (likely(PyList_CheckExact(__pyx_v_dsts)) || PyTuple_CheckExact(__pyx_v_dsts)) { __pyx_t_3 = __pyx_v_dsts; __Pyx_INCREF(__pyx_t_3); __pyx_t_8 = 0; __pyx_t_9 = NULL; } else { __pyx_t_8 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_dsts); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21775, __pyx_L11_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_9 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_3); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 21775, __pyx_L11_error) } for (;;) { if (likely(!__pyx_t_9)) { if (likely(PyList_CheckExact(__pyx_t_3))) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_3); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 21775, __pyx_L11_error) #endif if (__pyx_t_8 >= __pyx_temp) break; } __pyx_t_4 = __Pyx_PyList_GetItemRef(__pyx_t_3, __pyx_t_8); ++__pyx_t_8; } else { { Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_3); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 21775, __pyx_L11_error) #endif if (__pyx_t_8 >= __pyx_temp) break; } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_8)); #else __pyx_t_4 = __Pyx_PySequence_ITEM(__pyx_t_3, __pyx_t_8); #endif ++__pyx_t_8; } if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21775, __pyx_L11_error) } else { __pyx_t_4 = __pyx_t_9(__pyx_t_3); if (unlikely(!__pyx_t_4)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 21775, __pyx_L11_error) PyErr_Clear(); } break; } } __Pyx_GOTREF(__pyx_t_4); __Pyx_XDECREF_SET(__pyx_10genexpr204__pyx_v_pydsts, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_10 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_11 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_10genexpr204__pyx_v_pydsts}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_11, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21775, __pyx_L11_error) __Pyx_GOTREF((PyObject *)__pyx_t_4); } if (unlikely(__Pyx_ListComp_Append(__pyx_t_5, (PyObject*)__pyx_t_4))) __PYX_ERR(0, 21775, __pyx_L11_error) __Pyx_DECREF((PyObject *)__pyx_t_4); __pyx_t_4 = 0; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_10genexpr204__pyx_v_pydsts); __pyx_10genexpr204__pyx_v_pydsts = 0; goto __pyx_L15_exit_scope; __pyx_L11_error:; __Pyx_XDECREF(__pyx_10genexpr204__pyx_v_pydsts); __pyx_10genexpr204__pyx_v_pydsts = 0; goto __pyx_L1_error; __pyx_L15_exit_scope:; } /* exit inner scope */ __pyx_v_pylist = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":21776 * dsts = [] if dsts is None else dsts * pylist = [_HelperInputVoidPtr(pydsts) for pydsts in dsts] * cdef _InputVoidPtrPtrHelper voidStarHelperdsts = _InputVoidPtrPtrHelper(pylist) # <<<<<<<<<<<<<< * cdef void** cydsts_ptr = voidStarHelperdsts.cptr * pylist = [_HelperInputVoidPtr(pysrcs) for pysrcs in srcs] */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__InputVoidPtrPtrHelper); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__InputVoidPtrPtrHelper); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_pylist}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21776, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_voidStarHelperdsts = ((struct __pyx_obj_4cuda_8bindings_7runtime__InputVoidPtrPtrHelper *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":21777 * pylist = [_HelperInputVoidPtr(pydsts) for pydsts in dsts] * cdef _InputVoidPtrPtrHelper voidStarHelperdsts = _InputVoidPtrPtrHelper(pylist) * cdef void** cydsts_ptr = voidStarHelperdsts.cptr # <<<<<<<<<<<<<< * pylist = [_HelperInputVoidPtr(pysrcs) for pysrcs in srcs] * cdef _InputVoidPtrPtrHelper voidStarHelpersrcs = _InputVoidPtrPtrHelper(pylist) */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_voidStarHelperdsts), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21777, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_5); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 21777, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_cydsts_ptr = ((void **)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":21778 * cdef _InputVoidPtrPtrHelper voidStarHelperdsts = _InputVoidPtrPtrHelper(pylist) * cdef void** cydsts_ptr = voidStarHelperdsts.cptr * pylist = [_HelperInputVoidPtr(pysrcs) for pysrcs in srcs] # <<<<<<<<<<<<<< * cdef _InputVoidPtrPtrHelper voidStarHelpersrcs = _InputVoidPtrPtrHelper(pylist) * cdef void** cysrcs_ptr = voidStarHelpersrcs.cptr */ { /* enter inner scope */ __pyx_t_5 = PyList_New(0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21778, __pyx_L18_error) __Pyx_GOTREF(__pyx_t_5); if (likely(PyList_CheckExact(__pyx_v_srcs)) || PyTuple_CheckExact(__pyx_v_srcs)) { __pyx_t_4 = __pyx_v_srcs; __Pyx_INCREF(__pyx_t_4); __pyx_t_8 = 0; __pyx_t_9 = NULL; } else { __pyx_t_8 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_srcs); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21778, __pyx_L18_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_9 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_4); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 21778, __pyx_L18_error) } for (;;) { if (likely(!__pyx_t_9)) { if (likely(PyList_CheckExact(__pyx_t_4))) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_4); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 21778, __pyx_L18_error) #endif if (__pyx_t_8 >= __pyx_temp) break; } __pyx_t_3 = __Pyx_PyList_GetItemRef(__pyx_t_4, __pyx_t_8); ++__pyx_t_8; } else { { Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_4); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 21778, __pyx_L18_error) #endif if (__pyx_t_8 >= __pyx_temp) break; } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_8)); #else __pyx_t_3 = __Pyx_PySequence_ITEM(__pyx_t_4, __pyx_t_8); #endif ++__pyx_t_8; } if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21778, __pyx_L18_error) } else { __pyx_t_3 = __pyx_t_9(__pyx_t_4); if (unlikely(!__pyx_t_3)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 21778, __pyx_L18_error) PyErr_Clear(); } break; } } __Pyx_GOTREF(__pyx_t_3); __Pyx_XDECREF_SET(__pyx_10genexpr205__pyx_v_pysrcs, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_11 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_10 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_11, __pyx_10genexpr205__pyx_v_pysrcs}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_10, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21778, __pyx_L18_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } if (unlikely(__Pyx_ListComp_Append(__pyx_t_5, (PyObject*)__pyx_t_3))) __PYX_ERR(0, 21778, __pyx_L18_error) __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_10genexpr205__pyx_v_pysrcs); __pyx_10genexpr205__pyx_v_pysrcs = 0; goto __pyx_L22_exit_scope; __pyx_L18_error:; __Pyx_XDECREF(__pyx_10genexpr205__pyx_v_pysrcs); __pyx_10genexpr205__pyx_v_pysrcs = 0; goto __pyx_L1_error; __pyx_L22_exit_scope:; } /* exit inner scope */ __Pyx_DECREF_SET(__pyx_v_pylist, ((PyObject*)__pyx_t_5)); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":21779 * cdef void** cydsts_ptr = voidStarHelperdsts.cptr * pylist = [_HelperInputVoidPtr(pysrcs) for pysrcs in srcs] * cdef _InputVoidPtrPtrHelper voidStarHelpersrcs = _InputVoidPtrPtrHelper(pylist) # <<<<<<<<<<<<<< * cdef void** cysrcs_ptr = voidStarHelpersrcs.cptr * cdef vector[size_t] cysizes = sizes */ __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__InputVoidPtrPtrHelper); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__InputVoidPtrPtrHelper); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_pylist}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21779, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_voidStarHelpersrcs = ((struct __pyx_obj_4cuda_8bindings_7runtime__InputVoidPtrPtrHelper *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":21780 * pylist = [_HelperInputVoidPtr(pysrcs) for pysrcs in srcs] * cdef _InputVoidPtrPtrHelper voidStarHelpersrcs = _InputVoidPtrPtrHelper(pylist) * cdef void** cysrcs_ptr = voidStarHelpersrcs.cptr # <<<<<<<<<<<<<< * cdef vector[size_t] cysizes = sizes * if count > len(dsts): raise RuntimeError("List is too small: " + str(len(dsts)) + " < " + str(count)) */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_voidStarHelpersrcs), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21780, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_5); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 21780, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_cysrcs_ptr = ((void **)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":21781 * cdef _InputVoidPtrPtrHelper voidStarHelpersrcs = _InputVoidPtrPtrHelper(pylist) * cdef void** cysrcs_ptr = voidStarHelpersrcs.cptr * cdef vector[size_t] cysizes = sizes # <<<<<<<<<<<<<< * if count > len(dsts): raise RuntimeError("List is too small: " + str(len(dsts)) + " < " + str(count)) * if count > len(srcs): raise RuntimeError("List is too small: " + str(len(srcs)) + " < " + str(count)) */ __pyx_t_12 = __pyx_convert_vector_from_py_size_t(__pyx_v_sizes); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 21781, __pyx_L1_error) __pyx_v_cysizes = __PYX_STD_MOVE_IF_SUPPORTED(__pyx_t_12); /* "cuda/bindings/runtime.pyx":21782 * cdef void** cysrcs_ptr = voidStarHelpersrcs.cptr * cdef vector[size_t] cysizes = sizes * if count > len(dsts): raise RuntimeError("List is too small: " + str(len(dsts)) + " < " + str(count)) # <<<<<<<<<<<<<< * if count > len(srcs): raise RuntimeError("List is too small: " + str(len(srcs)) + " < " + str(count)) * if count > len(sizes): raise RuntimeError("List is too small: " + str(len(sizes)) + " < " + str(count)) */ __pyx_t_8 = PyObject_Length(__pyx_v_dsts); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 21782, __pyx_L1_error) __pyx_t_2 = (__pyx_v_count > ((size_t)__pyx_t_8)); if (unlikely(__pyx_t_2)) { __pyx_t_3 = NULL; __Pyx_INCREF(__pyx_builtin_RuntimeError); __pyx_t_4 = __pyx_builtin_RuntimeError; __pyx_t_8 = PyObject_Length(__pyx_v_dsts); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 21782, __pyx_L1_error) __pyx_t_10 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 21782, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_10); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 21782, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_List_is_too_small, __pyx_t_11); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 21782, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_11 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_10, __pyx_mstate_global->__pyx_kp_u__6); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 21782, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyLong_FromSize_t(__pyx_v_count); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 21782, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __pyx_t_13 = __Pyx_PyObject_Unicode(__pyx_t_10); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 21782, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_11, __pyx_t_13); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 21782, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_10}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21782, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); } __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(0, 21782, __pyx_L1_error) } /* "cuda/bindings/runtime.pyx":21783 * cdef vector[size_t] cysizes = sizes * if count > len(dsts): raise RuntimeError("List is too small: " + str(len(dsts)) + " < " + str(count)) * if count > len(srcs): raise RuntimeError("List is too small: " + str(len(srcs)) + " < " + str(count)) # <<<<<<<<<<<<<< * if count > len(sizes): raise RuntimeError("List is too small: " + str(len(sizes)) + " < " + str(count)) * cdef cyruntime.cudaMemcpyAttributes* cyattrs = NULL */ __pyx_t_8 = PyObject_Length(__pyx_v_srcs); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 21783, __pyx_L1_error) __pyx_t_2 = (__pyx_v_count > ((size_t)__pyx_t_8)); if (unlikely(__pyx_t_2)) { __pyx_t_4 = NULL; __Pyx_INCREF(__pyx_builtin_RuntimeError); __pyx_t_10 = __pyx_builtin_RuntimeError; __pyx_t_8 = PyObject_Length(__pyx_v_srcs); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 21783, __pyx_L1_error) __pyx_t_3 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21783, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_13 = __Pyx_PyObject_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 21783, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_List_is_too_small, __pyx_t_13); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21783, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; __pyx_t_13 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_3, __pyx_mstate_global->__pyx_kp_u__6); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 21783, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyLong_FromSize_t(__pyx_v_count); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21783, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 21783, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_13, __pyx_t_11); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21783, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_3}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_10, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21783, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); } __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(0, 21783, __pyx_L1_error) } /* "cuda/bindings/runtime.pyx":21784 * if count > len(dsts): raise RuntimeError("List is too small: " + str(len(dsts)) + " < " + str(count)) * if count > len(srcs): raise RuntimeError("List is too small: " + str(len(srcs)) + " < " + str(count)) * if count > len(sizes): raise RuntimeError("List is too small: " + str(len(sizes)) + " < " + str(count)) # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemcpyAttributes* cyattrs = NULL * if len(attrs) > 1: */ __pyx_t_8 = PyObject_Length(__pyx_v_sizes); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 21784, __pyx_L1_error) __pyx_t_2 = (__pyx_v_count > ((size_t)__pyx_t_8)); if (unlikely(__pyx_t_2)) { __pyx_t_10 = NULL; __Pyx_INCREF(__pyx_builtin_RuntimeError); __pyx_t_3 = __pyx_builtin_RuntimeError; __pyx_t_8 = PyObject_Length(__pyx_v_sizes); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 21784, __pyx_L1_error) __pyx_t_4 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21784, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 21784, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_List_is_too_small, __pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21784, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_11 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_4, __pyx_mstate_global->__pyx_kp_u__6); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 21784, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyLong_FromSize_t(__pyx_v_count); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21784, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_13 = __Pyx_PyObject_Unicode(__pyx_t_4); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 21784, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_11, __pyx_t_13); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21784, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_4}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21784, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); } __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(0, 21784, __pyx_L1_error) } /* "cuda/bindings/runtime.pyx":21785 * if count > len(srcs): raise RuntimeError("List is too small: " + str(len(srcs)) + " < " + str(count)) * if count > len(sizes): raise RuntimeError("List is too small: " + str(len(sizes)) + " < " + str(count)) * cdef cyruntime.cudaMemcpyAttributes* cyattrs = NULL # <<<<<<<<<<<<<< * if len(attrs) > 1: * cyattrs = calloc(len(attrs), sizeof(cyruntime.cudaMemcpyAttributes)) */ __pyx_v_cyattrs = NULL; /* "cuda/bindings/runtime.pyx":21786 * if count > len(sizes): raise RuntimeError("List is too small: " + str(len(sizes)) + " < " + str(count)) * cdef cyruntime.cudaMemcpyAttributes* cyattrs = NULL * if len(attrs) > 1: # <<<<<<<<<<<<<< * cyattrs = calloc(len(attrs), sizeof(cyruntime.cudaMemcpyAttributes)) * if cyattrs is NULL: */ __pyx_t_8 = PyObject_Length(__pyx_v_attrs); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 21786, __pyx_L1_error) __pyx_t_2 = (__pyx_t_8 > 1); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":21787 * cdef cyruntime.cudaMemcpyAttributes* cyattrs = NULL * if len(attrs) > 1: * cyattrs = calloc(len(attrs), sizeof(cyruntime.cudaMemcpyAttributes)) # <<<<<<<<<<<<<< * if cyattrs is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(attrs)) + 'x' + str(sizeof(cyruntime.cudaMemcpyAttributes))) */ __pyx_t_8 = PyObject_Length(__pyx_v_attrs); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 21787, __pyx_L1_error) __pyx_v_cyattrs = ((struct cudaMemcpyAttributes *)calloc(__pyx_t_8, (sizeof(struct cudaMemcpyAttributes)))); /* "cuda/bindings/runtime.pyx":21788 * if len(attrs) > 1: * cyattrs = calloc(len(attrs), sizeof(cyruntime.cudaMemcpyAttributes)) * if cyattrs is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(attrs)) + 'x' + str(sizeof(cyruntime.cudaMemcpyAttributes))) * for idx in range(len(attrs)): */ __pyx_t_2 = (__pyx_v_cyattrs == NULL); if (unlikely(__pyx_t_2)) { /* "cuda/bindings/runtime.pyx":21789 * cyattrs = calloc(len(attrs), sizeof(cyruntime.cudaMemcpyAttributes)) * if cyattrs is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(attrs)) + 'x' + str(sizeof(cyruntime.cudaMemcpyAttributes))) # <<<<<<<<<<<<<< * for idx in range(len(attrs)): * string.memcpy(&cyattrs[idx], (attrs[idx])._pvt_ptr, sizeof(cyruntime.cudaMemcpyAttributes)) */ __pyx_t_3 = NULL; __Pyx_INCREF(__pyx_builtin_MemoryError); __pyx_t_4 = __pyx_builtin_MemoryError; __pyx_t_8 = PyObject_Length(__pyx_v_attrs); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 21789, __pyx_L1_error) __pyx_t_10 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 21789, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __pyx_t_13 = __Pyx_PyObject_Unicode(__pyx_t_10); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 21789, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_Failed_to_allocate_length_x_size, __pyx_t_13); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 21789, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; __pyx_t_13 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_x_2); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 21789, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyLong_FromSize_t((sizeof(struct cudaMemcpyAttributes))); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 21789, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_10); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 21789, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_13, __pyx_t_11); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 21789, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_10}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21789, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); } __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(0, 21789, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":21788 * if len(attrs) > 1: * cyattrs = calloc(len(attrs), sizeof(cyruntime.cudaMemcpyAttributes)) * if cyattrs is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(attrs)) + 'x' + str(sizeof(cyruntime.cudaMemcpyAttributes))) * for idx in range(len(attrs)): */ } /* "cuda/bindings/runtime.pyx":21790 * if cyattrs is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(attrs)) + 'x' + str(sizeof(cyruntime.cudaMemcpyAttributes))) * for idx in range(len(attrs)): # <<<<<<<<<<<<<< * string.memcpy(&cyattrs[idx], (attrs[idx])._pvt_ptr, sizeof(cyruntime.cudaMemcpyAttributes)) * elif len(attrs) == 1: */ __pyx_t_8 = PyObject_Length(__pyx_v_attrs); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 21790, __pyx_L1_error) __pyx_t_14 = __pyx_t_8; for (__pyx_t_15 = 0; __pyx_t_15 < __pyx_t_14; __pyx_t_15+=1) { __pyx_v_idx = __pyx_t_15; /* "cuda/bindings/runtime.pyx":21791 * raise MemoryError('Failed to allocate length x size memory: ' + str(len(attrs)) + 'x' + str(sizeof(cyruntime.cudaMemcpyAttributes))) * for idx in range(len(attrs)): * string.memcpy(&cyattrs[idx], (attrs[idx])._pvt_ptr, sizeof(cyruntime.cudaMemcpyAttributes)) # <<<<<<<<<<<<<< * elif len(attrs) == 1: * cyattrs = (attrs[0])._pvt_ptr */ __pyx_t_5 = __Pyx_GetItemInt(__pyx_v_attrs, __pyx_v_idx, Py_ssize_t, 1, PyLong_FromSsize_t, 0, 1, 1, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21791, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); (void)(memcpy((&(__pyx_v_cyattrs[__pyx_v_idx])), ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemcpyAttributes *)__pyx_t_5)->_pvt_ptr, (sizeof(struct cudaMemcpyAttributes)))); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } /* "cuda/bindings/runtime.pyx":21786 * if count > len(sizes): raise RuntimeError("List is too small: " + str(len(sizes)) + " < " + str(count)) * cdef cyruntime.cudaMemcpyAttributes* cyattrs = NULL * if len(attrs) > 1: # <<<<<<<<<<<<<< * cyattrs = calloc(len(attrs), sizeof(cyruntime.cudaMemcpyAttributes)) * if cyattrs is NULL: */ goto __pyx_L26; } /* "cuda/bindings/runtime.pyx":21792 * for idx in range(len(attrs)): * string.memcpy(&cyattrs[idx], (attrs[idx])._pvt_ptr, sizeof(cyruntime.cudaMemcpyAttributes)) * elif len(attrs) == 1: # <<<<<<<<<<<<<< * cyattrs = (attrs[0])._pvt_ptr * cdef vector[size_t] cyattrsIdxs = attrsIdxs */ __pyx_t_8 = PyObject_Length(__pyx_v_attrs); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 21792, __pyx_L1_error) __pyx_t_2 = (__pyx_t_8 == 1); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":21793 * string.memcpy(&cyattrs[idx], (attrs[idx])._pvt_ptr, sizeof(cyruntime.cudaMemcpyAttributes)) * elif len(attrs) == 1: * cyattrs = (attrs[0])._pvt_ptr # <<<<<<<<<<<<<< * cdef vector[size_t] cyattrsIdxs = attrsIdxs * if numAttrs > len(attrs): raise RuntimeError("List is too small: " + str(len(attrs)) + " < " + str(numAttrs)) */ __pyx_t_5 = __Pyx_GetItemInt(__pyx_v_attrs, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21793, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_16 = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemcpyAttributes *)__pyx_t_5)->_pvt_ptr; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_cyattrs = __pyx_t_16; /* "cuda/bindings/runtime.pyx":21792 * for idx in range(len(attrs)): * string.memcpy(&cyattrs[idx], (attrs[idx])._pvt_ptr, sizeof(cyruntime.cudaMemcpyAttributes)) * elif len(attrs) == 1: # <<<<<<<<<<<<<< * cyattrs = (attrs[0])._pvt_ptr * cdef vector[size_t] cyattrsIdxs = attrsIdxs */ } __pyx_L26:; /* "cuda/bindings/runtime.pyx":21794 * elif len(attrs) == 1: * cyattrs = (attrs[0])._pvt_ptr * cdef vector[size_t] cyattrsIdxs = attrsIdxs # <<<<<<<<<<<<<< * if numAttrs > len(attrs): raise RuntimeError("List is too small: " + str(len(attrs)) + " < " + str(numAttrs)) * if numAttrs > len(attrsIdxs): raise RuntimeError("List is too small: " + str(len(attrsIdxs)) + " < " + str(numAttrs)) */ __pyx_t_12 = __pyx_convert_vector_from_py_size_t(__pyx_v_attrsIdxs); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 21794, __pyx_L1_error) __pyx_v_cyattrsIdxs = __PYX_STD_MOVE_IF_SUPPORTED(__pyx_t_12); /* "cuda/bindings/runtime.pyx":21795 * cyattrs = (attrs[0])._pvt_ptr * cdef vector[size_t] cyattrsIdxs = attrsIdxs * if numAttrs > len(attrs): raise RuntimeError("List is too small: " + str(len(attrs)) + " < " + str(numAttrs)) # <<<<<<<<<<<<<< * if numAttrs > len(attrsIdxs): raise RuntimeError("List is too small: " + str(len(attrsIdxs)) + " < " + str(numAttrs)) * cdef size_t failIdx = 0 */ __pyx_t_8 = PyObject_Length(__pyx_v_attrs); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 21795, __pyx_L1_error) __pyx_t_2 = (__pyx_v_numAttrs > ((size_t)__pyx_t_8)); if (unlikely(__pyx_t_2)) { __pyx_t_4 = NULL; __Pyx_INCREF(__pyx_builtin_RuntimeError); __pyx_t_10 = __pyx_builtin_RuntimeError; __pyx_t_8 = PyObject_Length(__pyx_v_attrs); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 21795, __pyx_L1_error) __pyx_t_3 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21795, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 21795, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_List_is_too_small, __pyx_t_11); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21795, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_11 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_3, __pyx_mstate_global->__pyx_kp_u__6); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 21795, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyLong_FromSize_t(__pyx_v_numAttrs); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21795, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_13 = __Pyx_PyObject_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 21795, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_11, __pyx_t_13); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21795, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_3}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_10, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21795, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); } __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(0, 21795, __pyx_L1_error) } /* "cuda/bindings/runtime.pyx":21796 * cdef vector[size_t] cyattrsIdxs = attrsIdxs * if numAttrs > len(attrs): raise RuntimeError("List is too small: " + str(len(attrs)) + " < " + str(numAttrs)) * if numAttrs > len(attrsIdxs): raise RuntimeError("List is too small: " + str(len(attrsIdxs)) + " < " + str(numAttrs)) # <<<<<<<<<<<<<< * cdef size_t failIdx = 0 * with nogil: */ __pyx_t_8 = PyObject_Length(__pyx_v_attrsIdxs); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 21796, __pyx_L1_error) __pyx_t_2 = (__pyx_v_numAttrs > ((size_t)__pyx_t_8)); if (unlikely(__pyx_t_2)) { __pyx_t_10 = NULL; __Pyx_INCREF(__pyx_builtin_RuntimeError); __pyx_t_3 = __pyx_builtin_RuntimeError; __pyx_t_8 = PyObject_Length(__pyx_v_attrsIdxs); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 21796, __pyx_L1_error) __pyx_t_4 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21796, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_13 = __Pyx_PyObject_Unicode(__pyx_t_4); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 21796, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_List_is_too_small, __pyx_t_13); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21796, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; __pyx_t_13 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_4, __pyx_mstate_global->__pyx_kp_u__6); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 21796, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_13); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyLong_FromSize_t(__pyx_v_numAttrs); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21796, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 21796, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_13, __pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21796, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_10, __pyx_t_4}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21796, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); } __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(0, 21796, __pyx_L1_error) } /* "cuda/bindings/runtime.pyx":21797 * if numAttrs > len(attrs): raise RuntimeError("List is too small: " + str(len(attrs)) + " < " + str(numAttrs)) * if numAttrs > len(attrsIdxs): raise RuntimeError("List is too small: " + str(len(attrsIdxs)) + " < " + str(numAttrs)) * cdef size_t failIdx = 0 # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemcpyBatchAsync(cydsts_ptr, cysrcs_ptr, cysizes.data(), count, cyattrs, cyattrsIdxs.data(), numAttrs, &failIdx, cystream) */ __pyx_v_failIdx = 0; /* "cuda/bindings/runtime.pyx":21798 * if numAttrs > len(attrsIdxs): raise RuntimeError("List is too small: " + str(len(attrsIdxs)) + " < " + str(numAttrs)) * cdef size_t failIdx = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpyBatchAsync(cydsts_ptr, cysrcs_ptr, cysizes.data(), count, cyattrs, cyattrsIdxs.data(), numAttrs, &failIdx, cystream) * if len(attrs) > 1 and cyattrs is not NULL: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":21799 * cdef size_t failIdx = 0 * with nogil: * err = cyruntime.cudaMemcpyBatchAsync(cydsts_ptr, cysrcs_ptr, cysizes.data(), count, cyattrs, cyattrsIdxs.data(), numAttrs, &failIdx, cystream) # <<<<<<<<<<<<<< * if len(attrs) > 1 and cyattrs is not NULL: * free(cyattrs) */ __pyx_t_17 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemcpyBatchAsync(__pyx_v_cydsts_ptr, __pyx_v_cysrcs_ptr, __pyx_v_cysizes.data(), __pyx_v_count, __pyx_v_cyattrs, __pyx_v_cyattrsIdxs.data(), __pyx_v_numAttrs, (&__pyx_v_failIdx), __pyx_v_cystream); if (unlikely(__pyx_t_17 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21799, __pyx_L33_error) __pyx_v_err = __pyx_t_17; } /* "cuda/bindings/runtime.pyx":21798 * if numAttrs > len(attrsIdxs): raise RuntimeError("List is too small: " + str(len(attrsIdxs)) + " < " + str(numAttrs)) * cdef size_t failIdx = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpyBatchAsync(cydsts_ptr, cysrcs_ptr, cysizes.data(), count, cyattrs, cyattrsIdxs.data(), numAttrs, &failIdx, cystream) * if len(attrs) > 1 and cyattrs is not NULL: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L34; } __pyx_L33_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L34:; } } /* "cuda/bindings/runtime.pyx":21800 * with nogil: * err = cyruntime.cudaMemcpyBatchAsync(cydsts_ptr, cysrcs_ptr, cysizes.data(), count, cyattrs, cyattrsIdxs.data(), numAttrs, &failIdx, cystream) * if len(attrs) > 1 and cyattrs is not NULL: # <<<<<<<<<<<<<< * free(cyattrs) * if err != cyruntime.cudaSuccess: */ __pyx_t_8 = PyObject_Length(__pyx_v_attrs); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 21800, __pyx_L1_error) __pyx_t_1 = (__pyx_t_8 > 1); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L36_bool_binop_done; } __pyx_t_1 = (__pyx_v_cyattrs != NULL); __pyx_t_2 = __pyx_t_1; __pyx_L36_bool_binop_done:; if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":21801 * err = cyruntime.cudaMemcpyBatchAsync(cydsts_ptr, cysrcs_ptr, cysizes.data(), count, cyattrs, cyattrsIdxs.data(), numAttrs, &failIdx, cystream) * if len(attrs) > 1 and cyattrs is not NULL: * free(cyattrs) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ free(__pyx_v_cyattrs); /* "cuda/bindings/runtime.pyx":21800 * with nogil: * err = cyruntime.cudaMemcpyBatchAsync(cydsts_ptr, cysrcs_ptr, cysizes.data(), count, cyattrs, cyattrsIdxs.data(), numAttrs, &failIdx, cystream) * if len(attrs) > 1 and cyattrs is not NULL: # <<<<<<<<<<<<<< * free(cyattrs) * if err != cyruntime.cudaSuccess: */ } /* "cuda/bindings/runtime.pyx":21802 * if len(attrs) > 1 and cyattrs is not NULL: * free(cyattrs) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], failIdx) */ __pyx_t_2 = (__pyx_v_err != cudaSuccess); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":21803 * free(cyattrs) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], failIdx) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21803, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21803, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21803, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21803, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 21803, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 21803, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":21802 * if len(attrs) > 1 and cyattrs is not NULL: * free(cyattrs) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], failIdx) */ } /* "cuda/bindings/runtime.pyx":21804 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], failIdx) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21804, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21804, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21804, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyLong_FromSize_t(__pyx_v_failIdx); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21804, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21804, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 21804, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 21804, __pyx_L1_error); __pyx_t_5 = 0; __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":21648 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpyBatchAsync(dsts : Optional[tuple[Any] | list[Any]], srcs : Optional[tuple[Any] | list[Any]], sizes : tuple[int] | list[int], size_t count, attrs : Optional[tuple[cudaMemcpyAttributes] | list[cudaMemcpyAttributes]], attrsIdxs : tuple[int] | list[int], size_t numAttrs, stream): * """ Performs a batch of memory copies asynchronously. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_10); __Pyx_XDECREF(__pyx_t_11); __Pyx_XDECREF(__pyx_t_13); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpyBatchAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XDECREF(__pyx_v_pylist); __Pyx_XDECREF((PyObject *)__pyx_v_voidStarHelperdsts); __Pyx_XDECREF((PyObject *)__pyx_v_voidStarHelpersrcs); __Pyx_XDECREF(__pyx_gb_4cuda_8bindings_7runtime_20cudaMemcpyBatchAsync_2generator87); __Pyx_XDECREF(__pyx_gb_4cuda_8bindings_7runtime_20cudaMemcpyBatchAsync_5generator88); __Pyx_XDECREF(__pyx_gb_4cuda_8bindings_7runtime_20cudaMemcpyBatchAsync_8generator89); __Pyx_XDECREF(__pyx_10genexpr204__pyx_v_pydsts); __Pyx_XDECREF(__pyx_10genexpr205__pyx_v_pysrcs); __Pyx_XDECREF(__pyx_v_dsts); __Pyx_XDECREF(__pyx_v_srcs); __Pyx_XDECREF(__pyx_v_attrs); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":21806 * return (_dict_cudaError_t[err], failIdx) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpy3DBatchAsync(size_t numOps, opList : Optional[tuple[cudaMemcpy3DBatchOp] | list[cudaMemcpy3DBatchOp]], unsigned long long flags, stream): * """ Performs a batch of 3D memory copies asynchronously. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_255cudaMemcpy3DBatchAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_254cudaMemcpy3DBatchAsync, "cudaMemcpy3DBatchAsync(size_t numOps, opList: Optional[tuple[cudaMemcpy3DBatchOp] | list[cudaMemcpy3DBatchOp]], unsigned long long flags, stream)\n\nPerforms a batch of 3D memory copies asynchronously.\n\nPerforms a batch of memory copies. The batch as a whole executes in\nstream order but copies within a batch are not guaranteed to execute in\nany specific order. Note that this means specifying any dependent\ncopies within a batch will result in undefined behavior.\n\nPerforms memory copies as specified in the `opList` array. The length\nof this array is specified in `numOps`. Each entry in this array\ndescribes a copy operation. This includes among other things, the\nsource and destination operands for the copy as specified in\n:py:obj:`~.cudaMemcpy3DBatchOp.src` and\n:py:obj:`~.cudaMemcpy3DBatchOp.dst` respectively. The source and\ndestination operands of a copy can either be a pointer or a CUDA array.\nThe width, height and depth of a copy is specified in\n:py:obj:`~.cudaMemcpy3DBatchOp.extent`. The width, height and depth of\na copy are specified in elements and must not be zero. For pointer-to-\npointer copies, the element size is considered to be 1. For pointer to\nCUDA array or vice versa copies, the element size is determined by the\nCUDA array. For CUDA array to CUDA array copies, the element size of\nthe two CUDA arrays must match.\n\nFor a given operand, if :py:obj:`~.cudaMemcpy3DOperand`::type is\nspecified as :py:obj:`~.cudaMemcpyOperandTypePointer`, then\n:py:obj:`~.cudaMemcpy3DOperand`::op::ptr will be used. The\n:py:obj:`~.cudaMemcpy3DOperand`::op::ptr::ptr field must contain the\npointer where the copy should begin. The\n:py:obj:`~.cudaMemcpy3DOperand`::op::ptr::rowLength field specifies the\nlength of each row in elements and must either be zero or be greater\nthan or equal to the width of the copy specified in\n:py:obj:`~.cudaMemcpy3DBatchOp`::extent::width. The\n:py:obj:`~.cudaMemcpy3DOperand`::op::ptr::layerHeight field specifies\nthe height of"" each layer and must either be zero or be greater than or\nequal to the height of the copy specified in\n:py:obj:`~.cudaMemcpy3DBatchOp`::extent::height. When either of these\nvalues is zero, that aspect of the operand is considered to be tightly\npacked according to the copy extent. For managed memory pointers on\ndevices where :py:obj:`~.cudaDevAttrConcurrentManagedAccess` is true or\nsystem-allocated pageable memory on devices where\n:py:obj:`~.cudaDevAttrPageableMemoryAccess` is true, the\n:py:obj:`~.cudaMemcpy3DOperand`::op::ptr::locHint field can be used to\nhint the location of the operand.\n\nIf an operand's type is specified as\n:py:obj:`~.cudaMemcpyOperandTypeArray`, then\n:py:obj:`~.cudaMemcpy3DOperand`::op::array will be used. The\n:py:obj:`~.cudaMemcpy3DOperand`::op::array::array field specifies the\nCUDA array and :py:obj:`~.cudaMemcpy3DOperand`::op::array::offset\nspecifies the 3D offset into that array where the copy begins.\n\nThe :py:obj:`~.cudaMemcpyAttributes.srcAccessOrder` indicates the\nsource access ordering to be observed for copies associated with the\nattribute. If the source access order is set to\n:py:obj:`~.cudaMemcpySrcAccessOrderStream`, then the source will be\naccessed in stream order. If the source access order is set to\n:py:obj:`~.cudaMemcpySrcAccessOrderDuringApiCall` then it indicates\nthat access to the source pointer can be out of stream order and all\naccesses must be complete before the API call returns. This flag is\nsuited for ephemeral sources (ex., stack variables) when it's known\nthat no prior operations in the stream can be accessing the memory and\nalso that the lifetime of the memory is limited to the scope that the\nsource variable was declared in. Specifying this flag allows the driver\nto optimize the copy and removes the need for the user to synchronize\nthe stream after the API call. If the source access order is set to\n:py:obj:`~.cudaMemcpySrcAccessOrderAny` then it indicates that access\nto the source point""er can be out of stream order and the accesses can\nhappen even after the API call returns. This flag is suited for host\npointers allocated outside CUDA (ex., via malloc) when it's known that\nno prior operations in the stream can be accessing the memory.\nSpecifying this flag allows the driver to optimize the copy on certain\nplatforms. Each memcopy operation in `opList` must have a valid\nsrcAccessOrder setting, otherwise this API will return\n:py:obj:`~.cudaErrorInvalidValue`.\n\nThe :py:obj:`~.cudaMemcpyAttributes.flags` field can be used to specify\ncertain flags for copies. Setting the\n:py:obj:`~.cudaMemcpyFlagPreferOverlapWithCompute` flag indicates that\nthe associated copies should preferably overlap with any compute work.\nNote that this flag is a hint and can be ignored depending on the\nplatform and other parameters of the copy.\n\nIf any error is encountered while parsing the batch, the index within\nthe batch where the error was encountered will be returned in\n`failIdx`.\n\nParameters\n----------\nnumOps : size_t\n Total number of memcpy operations.\nopList : list[:py:obj:`~.cudaMemcpy3DBatchOp`]\n Array of size `numOps` containing the actual memcpy operations.\nflags : unsigned long long\n Flags for future use, must be zero now.\nhStream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n The stream to enqueue the operations in. Must not be default NULL\n stream.\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess` :py:obj:`~.cudaErrorInvalidValue`\nfailIdx : int\n Pointer to a location to return the index of the copy where a\n failure was encountered. The value will be SIZE_MAX if the error\n doesn't pertain to any specific copy."); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_255cudaMemcpy3DBatchAsync = {"cudaMemcpy3DBatchAsync", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_255cudaMemcpy3DBatchAsync, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_254cudaMemcpy3DBatchAsync}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_255cudaMemcpy3DBatchAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { size_t __pyx_v_numOps; PyObject *__pyx_v_opList = 0; unsigned PY_LONG_LONG __pyx_v_flags; PyObject *__pyx_v_stream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[4] = {0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemcpy3DBatchAsync (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_numOps,&__pyx_mstate_global->__pyx_n_u_opList,&__pyx_mstate_global->__pyx_n_u_flags_2,&__pyx_mstate_global->__pyx_n_u_stream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21806, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 21806, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 21806, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21806, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21806, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemcpy3DBatchAsync", 0) < (0)) __PYX_ERR(0, 21806, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 4; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemcpy3DBatchAsync", 1, 4, 4, i); __PYX_ERR(0, 21806, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 4)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21806, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21806, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 21806, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 21806, __pyx_L3_error) } __pyx_v_numOps = __Pyx_PyLong_As_size_t(values[0]); if (unlikely((__pyx_v_numOps == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 21807, __pyx_L3_error) __pyx_v_opList = values[1]; __pyx_v_flags = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(values[2]); if (unlikely((__pyx_v_flags == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 21807, __pyx_L3_error) __pyx_v_stream = values[3]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemcpy3DBatchAsync", 1, 4, 4, __pyx_nargs); __PYX_ERR(0, 21806, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpy3DBatchAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_254cudaMemcpy3DBatchAsync(__pyx_self, __pyx_v_numOps, __pyx_v_opList, __pyx_v_flags, __pyx_v_stream); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_22cudaMemcpy3DBatchAsync_2generator90(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value); /* proto */ /* "cuda/bindings/runtime.pyx":21923 * cystream = pstream * opList = [] if opList is None else opList * if not all(isinstance(_x, (cudaMemcpy3DBatchOp,)) for _x in opList): # <<<<<<<<<<<<<< * raise TypeError("Argument 'opList' is not instance of type (expected tuple[cyruntime.cudaMemcpy3DBatchOp,] or list[cyruntime.cudaMemcpy3DBatchOp,]") * if numOps > len(opList): raise RuntimeError("List is too small: " + str(len(opList)) + " < " + str(numOps)) */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_22cudaMemcpy3DBatchAsync_genexpr(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_genexpr_arg_0) { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_90_genexpr *__pyx_cur_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("genexpr", 0); __pyx_cur_scope = (struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_90_genexpr *)__pyx_tp_new_4cuda_8bindings_7runtime___pyx_scope_struct_90_genexpr(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime___pyx_scope_struct_90_genexpr, __pyx_mstate_global->__pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_90_genexpr *)Py_None); __Pyx_INCREF(Py_None); __PYX_ERR(0, 21923, __pyx_L1_error) } else { __Pyx_GOTREF((PyObject *)__pyx_cur_scope); } __pyx_cur_scope->__pyx_genexpr_arg_0 = __pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_cur_scope->__pyx_genexpr_arg_0); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_genexpr_arg_0); { __pyx_CoroutineObject *gen = __Pyx_Generator_New((__pyx_coroutine_body_t) __pyx_gb_4cuda_8bindings_7runtime_22cudaMemcpy3DBatchAsync_2generator90, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[90]), (PyObject *) __pyx_cur_scope, __pyx_mstate_global->__pyx_n_u_genexpr, __pyx_mstate_global->__pyx_n_u_cudaMemcpy3DBatchAsync_locals_ge, __pyx_mstate_global->__pyx_n_u_cuda_bindings_runtime); if (unlikely(!gen)) __PYX_ERR(0, 21923, __pyx_L1_error) __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; } /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpy3DBatchAsync.genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_DECREF((PyObject *)__pyx_cur_scope); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_22cudaMemcpy3DBatchAsync_2generator90(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value) /* generator body */ { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_90_genexpr *__pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_90_genexpr *)__pyx_generator->closure); PyObject *__pyx_r = NULL; PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *(*__pyx_t_3)(PyObject *); PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("genexpr", 0); switch (__pyx_generator->resume_label) { case 0: goto __pyx_L3_first_run; default: /* CPython raises the right error here */ __Pyx_RefNannyFinishContext(); return NULL; } __pyx_L3_first_run:; if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 21923, __pyx_L1_error) if (unlikely(!__pyx_cur_scope->__pyx_genexpr_arg_0)) { __Pyx_RaiseUnboundLocalError(".0"); __PYX_ERR(0, 21923, __pyx_L1_error) } if (likely(PyList_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) || PyTuple_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) { __pyx_t_1 = __pyx_cur_scope->__pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; __pyx_t_3 = NULL; } else { __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_cur_scope->__pyx_genexpr_arg_0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21923, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21923, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_3)) { if (likely(PyList_CheckExact(__pyx_t_1))) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 21923, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } __pyx_t_4 = __Pyx_PyList_GetItemRef(__pyx_t_1, __pyx_t_2); ++__pyx_t_2; } else { { Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 21923, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2)); #else __pyx_t_4 = __Pyx_PySequence_ITEM(__pyx_t_1, __pyx_t_2); #endif ++__pyx_t_2; } if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21923, __pyx_L1_error) } else { __pyx_t_4 = __pyx_t_3(__pyx_t_1); if (unlikely(!__pyx_t_4)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 21923, __pyx_L1_error) PyErr_Clear(); } break; } } __Pyx_GOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v__x); __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v__x, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __Pyx_TypeCheck(__pyx_cur_scope->__pyx_v__x, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemcpy3DBatchOp); __pyx_t_6 = (!__pyx_t_5); if (__pyx_t_6) { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_False); __pyx_r = Py_False; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_True); __pyx_r = Py_True; goto __pyx_L0; } CYTHON_MAYBE_UNUSED_VAR(__pyx_cur_scope); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); if (__Pyx_PyErr_Occurred()) { __Pyx_Generator_Replace_StopIteration(0); __Pyx_AddTraceback("genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); } __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); #if !CYTHON_USE_EXC_INFO_STACK __Pyx_Coroutine_ResetAndClearException(__pyx_generator); #endif __pyx_generator->resume_label = -1; __Pyx_Coroutine_clear((PyObject*)__pyx_generator); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":21806 * return (_dict_cudaError_t[err], failIdx) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpy3DBatchAsync(size_t numOps, opList : Optional[tuple[cudaMemcpy3DBatchOp] | list[cudaMemcpy3DBatchOp]], unsigned long long flags, stream): * """ Performs a batch of 3D memory copies asynchronously. */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_254cudaMemcpy3DBatchAsync(CYTHON_UNUSED PyObject *__pyx_self, size_t __pyx_v_numOps, PyObject *__pyx_v_opList, unsigned PY_LONG_LONG __pyx_v_flags, PyObject *__pyx_v_stream) { cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; struct cudaMemcpy3DBatchOp *__pyx_v_cyopList; Py_ssize_t __pyx_v_idx; size_t __pyx_v_failIdx; cudaError_t __pyx_v_err; PyObject *__pyx_gb_4cuda_8bindings_7runtime_22cudaMemcpy3DBatchAsync_2generator90 = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; Py_ssize_t __pyx_t_8; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; Py_ssize_t __pyx_t_12; Py_ssize_t __pyx_t_13; struct cudaMemcpy3DBatchOp *__pyx_t_14; cudaError_t __pyx_t_15; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemcpy3DBatchAsync", 0); __Pyx_INCREF(__pyx_v_opList); /* "cuda/bindings/runtime.pyx":21915 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":21916 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":21915 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":21917 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":21918 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21918, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":21917 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":21920 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * opList = [] if opList is None else opList */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_stream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21920, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21920, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":21921 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * opList = [] if opList is None else opList * if not all(isinstance(_x, (cudaMemcpy3DBatchOp,)) for _x in opList): */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 21921, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":21922 * pstream = int(cudaStream_t(stream)) * cystream = pstream * opList = [] if opList is None else opList # <<<<<<<<<<<<<< * if not all(isinstance(_x, (cudaMemcpy3DBatchOp,)) for _x in opList): * raise TypeError("Argument 'opList' is not instance of type (expected tuple[cyruntime.cudaMemcpy3DBatchOp,] or list[cyruntime.cudaMemcpy3DBatchOp,]") */ __pyx_t_1 = (__pyx_v_opList == Py_None); if (__pyx_t_1) { __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21922, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __pyx_t_3; __pyx_t_3 = 0; } else { __Pyx_INCREF(__pyx_v_opList); __pyx_t_5 = __pyx_v_opList; } __Pyx_DECREF_SET(__pyx_v_opList, __pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":21923 * cystream = pstream * opList = [] if opList is None else opList * if not all(isinstance(_x, (cudaMemcpy3DBatchOp,)) for _x in opList): # <<<<<<<<<<<<<< * raise TypeError("Argument 'opList' is not instance of type (expected tuple[cyruntime.cudaMemcpy3DBatchOp,] or list[cyruntime.cudaMemcpy3DBatchOp,]") * if numOps > len(opList): raise RuntimeError("List is too small: " + str(len(opList)) + " < " + str(numOps)) */ __pyx_t_5 = __pyx_pf_4cuda_8bindings_7runtime_22cudaMemcpy3DBatchAsync_genexpr(NULL, __pyx_v_opList); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 21923, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_Generator_GetInlinedResult(__pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21923, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 21923, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_2 = (!__pyx_t_1); if (unlikely(__pyx_t_2)) { /* "cuda/bindings/runtime.pyx":21924 * opList = [] if opList is None else opList * if not all(isinstance(_x, (cudaMemcpy3DBatchOp,)) for _x in opList): * raise TypeError("Argument 'opList' is not instance of type (expected tuple[cyruntime.cudaMemcpy3DBatchOp,] or list[cyruntime.cudaMemcpy3DBatchOp,]") # <<<<<<<<<<<<<< * if numOps > len(opList): raise RuntimeError("List is too small: " + str(len(opList)) + " < " + str(numOps)) * cdef cyruntime.cudaMemcpy3DBatchOp* cyopList = NULL */ __pyx_t_5 = NULL; __Pyx_INCREF(__pyx_builtin_TypeError); __pyx_t_4 = __pyx_builtin_TypeError; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_mstate_global->__pyx_kp_u_Argument_opList_is_not_instance}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21924, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 21924, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":21923 * cystream = pstream * opList = [] if opList is None else opList * if not all(isinstance(_x, (cudaMemcpy3DBatchOp,)) for _x in opList): # <<<<<<<<<<<<<< * raise TypeError("Argument 'opList' is not instance of type (expected tuple[cyruntime.cudaMemcpy3DBatchOp,] or list[cyruntime.cudaMemcpy3DBatchOp,]") * if numOps > len(opList): raise RuntimeError("List is too small: " + str(len(opList)) + " < " + str(numOps)) */ } /* "cuda/bindings/runtime.pyx":21925 * if not all(isinstance(_x, (cudaMemcpy3DBatchOp,)) for _x in opList): * raise TypeError("Argument 'opList' is not instance of type (expected tuple[cyruntime.cudaMemcpy3DBatchOp,] or list[cyruntime.cudaMemcpy3DBatchOp,]") * if numOps > len(opList): raise RuntimeError("List is too small: " + str(len(opList)) + " < " + str(numOps)) # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemcpy3DBatchOp* cyopList = NULL * if len(opList) > 1: */ __pyx_t_8 = PyObject_Length(__pyx_v_opList); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 21925, __pyx_L1_error) __pyx_t_2 = (__pyx_v_numOps > ((size_t)__pyx_t_8)); if (unlikely(__pyx_t_2)) { __pyx_t_4 = NULL; __Pyx_INCREF(__pyx_builtin_RuntimeError); __pyx_t_5 = __pyx_builtin_RuntimeError; __pyx_t_8 = PyObject_Length(__pyx_v_opList); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 21925, __pyx_L1_error) __pyx_t_9 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 21925, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 21925, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_List_is_too_small, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 21925, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_9, __pyx_mstate_global->__pyx_kp_u__6); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 21925, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyLong_FromSize_t(__pyx_v_numOps); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 21925, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_9); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 21925, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_10, __pyx_t_11); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 21925, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_9}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21925, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 21925, __pyx_L1_error) } /* "cuda/bindings/runtime.pyx":21926 * raise TypeError("Argument 'opList' is not instance of type (expected tuple[cyruntime.cudaMemcpy3DBatchOp,] or list[cyruntime.cudaMemcpy3DBatchOp,]") * if numOps > len(opList): raise RuntimeError("List is too small: " + str(len(opList)) + " < " + str(numOps)) * cdef cyruntime.cudaMemcpy3DBatchOp* cyopList = NULL # <<<<<<<<<<<<<< * if len(opList) > 1: * cyopList = calloc(len(opList), sizeof(cyruntime.cudaMemcpy3DBatchOp)) */ __pyx_v_cyopList = NULL; /* "cuda/bindings/runtime.pyx":21927 * if numOps > len(opList): raise RuntimeError("List is too small: " + str(len(opList)) + " < " + str(numOps)) * cdef cyruntime.cudaMemcpy3DBatchOp* cyopList = NULL * if len(opList) > 1: # <<<<<<<<<<<<<< * cyopList = calloc(len(opList), sizeof(cyruntime.cudaMemcpy3DBatchOp)) * if cyopList is NULL: */ __pyx_t_8 = PyObject_Length(__pyx_v_opList); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 21927, __pyx_L1_error) __pyx_t_2 = (__pyx_t_8 > 1); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":21928 * cdef cyruntime.cudaMemcpy3DBatchOp* cyopList = NULL * if len(opList) > 1: * cyopList = calloc(len(opList), sizeof(cyruntime.cudaMemcpy3DBatchOp)) # <<<<<<<<<<<<<< * if cyopList is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(opList)) + 'x' + str(sizeof(cyruntime.cudaMemcpy3DBatchOp))) */ __pyx_t_8 = PyObject_Length(__pyx_v_opList); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 21928, __pyx_L1_error) __pyx_v_cyopList = ((struct cudaMemcpy3DBatchOp *)calloc(__pyx_t_8, (sizeof(struct cudaMemcpy3DBatchOp)))); /* "cuda/bindings/runtime.pyx":21929 * if len(opList) > 1: * cyopList = calloc(len(opList), sizeof(cyruntime.cudaMemcpy3DBatchOp)) * if cyopList is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(opList)) + 'x' + str(sizeof(cyruntime.cudaMemcpy3DBatchOp))) * for idx in range(len(opList)): */ __pyx_t_2 = (__pyx_v_cyopList == NULL); if (unlikely(__pyx_t_2)) { /* "cuda/bindings/runtime.pyx":21930 * cyopList = calloc(len(opList), sizeof(cyruntime.cudaMemcpy3DBatchOp)) * if cyopList is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(opList)) + 'x' + str(sizeof(cyruntime.cudaMemcpy3DBatchOp))) # <<<<<<<<<<<<<< * for idx in range(len(opList)): * string.memcpy(&cyopList[idx], (opList[idx])._pvt_ptr, sizeof(cyruntime.cudaMemcpy3DBatchOp)) */ __pyx_t_5 = NULL; __Pyx_INCREF(__pyx_builtin_MemoryError); __pyx_t_9 = __pyx_builtin_MemoryError; __pyx_t_8 = PyObject_Length(__pyx_v_opList); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 21930, __pyx_L1_error) __pyx_t_4 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21930, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 21930, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_Failed_to_allocate_length_x_size, __pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21930, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_11 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_x_2); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 21930, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyLong_FromSize_t((sizeof(struct cudaMemcpy3DBatchOp))); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21930, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 21930, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_11, __pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21930, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_4}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_9, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21930, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 21930, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":21929 * if len(opList) > 1: * cyopList = calloc(len(opList), sizeof(cyruntime.cudaMemcpy3DBatchOp)) * if cyopList is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(opList)) + 'x' + str(sizeof(cyruntime.cudaMemcpy3DBatchOp))) * for idx in range(len(opList)): */ } /* "cuda/bindings/runtime.pyx":21931 * if cyopList is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(opList)) + 'x' + str(sizeof(cyruntime.cudaMemcpy3DBatchOp))) * for idx in range(len(opList)): # <<<<<<<<<<<<<< * string.memcpy(&cyopList[idx], (opList[idx])._pvt_ptr, sizeof(cyruntime.cudaMemcpy3DBatchOp)) * elif len(opList) == 1: */ __pyx_t_8 = PyObject_Length(__pyx_v_opList); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 21931, __pyx_L1_error) __pyx_t_12 = __pyx_t_8; for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { __pyx_v_idx = __pyx_t_13; /* "cuda/bindings/runtime.pyx":21932 * raise MemoryError('Failed to allocate length x size memory: ' + str(len(opList)) + 'x' + str(sizeof(cyruntime.cudaMemcpy3DBatchOp))) * for idx in range(len(opList)): * string.memcpy(&cyopList[idx], (opList[idx])._pvt_ptr, sizeof(cyruntime.cudaMemcpy3DBatchOp)) # <<<<<<<<<<<<<< * elif len(opList) == 1: * cyopList = (opList[0])._pvt_ptr */ __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_opList, __pyx_v_idx, Py_ssize_t, 1, PyLong_FromSsize_t, 0, 1, 1, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21932, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); (void)(memcpy((&(__pyx_v_cyopList[__pyx_v_idx])), ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemcpy3DBatchOp *)__pyx_t_3)->_pvt_ptr, (sizeof(struct cudaMemcpy3DBatchOp)))); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } /* "cuda/bindings/runtime.pyx":21927 * if numOps > len(opList): raise RuntimeError("List is too small: " + str(len(opList)) + " < " + str(numOps)) * cdef cyruntime.cudaMemcpy3DBatchOp* cyopList = NULL * if len(opList) > 1: # <<<<<<<<<<<<<< * cyopList = calloc(len(opList), sizeof(cyruntime.cudaMemcpy3DBatchOp)) * if cyopList is NULL: */ goto __pyx_L8; } /* "cuda/bindings/runtime.pyx":21933 * for idx in range(len(opList)): * string.memcpy(&cyopList[idx], (opList[idx])._pvt_ptr, sizeof(cyruntime.cudaMemcpy3DBatchOp)) * elif len(opList) == 1: # <<<<<<<<<<<<<< * cyopList = (opList[0])._pvt_ptr * cdef size_t failIdx = 0 */ __pyx_t_8 = PyObject_Length(__pyx_v_opList); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 21933, __pyx_L1_error) __pyx_t_2 = (__pyx_t_8 == 1); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":21934 * string.memcpy(&cyopList[idx], (opList[idx])._pvt_ptr, sizeof(cyruntime.cudaMemcpy3DBatchOp)) * elif len(opList) == 1: * cyopList = (opList[0])._pvt_ptr # <<<<<<<<<<<<<< * cdef size_t failIdx = 0 * with nogil: */ __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_opList, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21934, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_14 = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemcpy3DBatchOp *)__pyx_t_3)->_pvt_ptr; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_cyopList = __pyx_t_14; /* "cuda/bindings/runtime.pyx":21933 * for idx in range(len(opList)): * string.memcpy(&cyopList[idx], (opList[idx])._pvt_ptr, sizeof(cyruntime.cudaMemcpy3DBatchOp)) * elif len(opList) == 1: # <<<<<<<<<<<<<< * cyopList = (opList[0])._pvt_ptr * cdef size_t failIdx = 0 */ } __pyx_L8:; /* "cuda/bindings/runtime.pyx":21935 * elif len(opList) == 1: * cyopList = (opList[0])._pvt_ptr * cdef size_t failIdx = 0 # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemcpy3DBatchAsync(numOps, cyopList, &failIdx, flags, cystream) */ __pyx_v_failIdx = 0; /* "cuda/bindings/runtime.pyx":21936 * cyopList = (opList[0])._pvt_ptr * cdef size_t failIdx = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpy3DBatchAsync(numOps, cyopList, &failIdx, flags, cystream) * if len(opList) > 1 and cyopList is not NULL: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":21937 * cdef size_t failIdx = 0 * with nogil: * err = cyruntime.cudaMemcpy3DBatchAsync(numOps, cyopList, &failIdx, flags, cystream) # <<<<<<<<<<<<<< * if len(opList) > 1 and cyopList is not NULL: * free(cyopList) */ __pyx_t_15 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemcpy3DBatchAsync(__pyx_v_numOps, __pyx_v_cyopList, (&__pyx_v_failIdx), __pyx_v_flags, __pyx_v_cystream); if (unlikely(__pyx_t_15 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 21937, __pyx_L13_error) __pyx_v_err = __pyx_t_15; } /* "cuda/bindings/runtime.pyx":21936 * cyopList = (opList[0])._pvt_ptr * cdef size_t failIdx = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpy3DBatchAsync(numOps, cyopList, &failIdx, flags, cystream) * if len(opList) > 1 and cyopList is not NULL: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L14; } __pyx_L13_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L14:; } } /* "cuda/bindings/runtime.pyx":21938 * with nogil: * err = cyruntime.cudaMemcpy3DBatchAsync(numOps, cyopList, &failIdx, flags, cystream) * if len(opList) > 1 and cyopList is not NULL: # <<<<<<<<<<<<<< * free(cyopList) * if err != cyruntime.cudaSuccess: */ __pyx_t_8 = PyObject_Length(__pyx_v_opList); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 21938, __pyx_L1_error) __pyx_t_1 = (__pyx_t_8 > 1); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L16_bool_binop_done; } __pyx_t_1 = (__pyx_v_cyopList != NULL); __pyx_t_2 = __pyx_t_1; __pyx_L16_bool_binop_done:; if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":21939 * err = cyruntime.cudaMemcpy3DBatchAsync(numOps, cyopList, &failIdx, flags, cystream) * if len(opList) > 1 and cyopList is not NULL: * free(cyopList) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ free(__pyx_v_cyopList); /* "cuda/bindings/runtime.pyx":21938 * with nogil: * err = cyruntime.cudaMemcpy3DBatchAsync(numOps, cyopList, &failIdx, flags, cystream) * if len(opList) > 1 and cyopList is not NULL: # <<<<<<<<<<<<<< * free(cyopList) * if err != cyruntime.cudaSuccess: */ } /* "cuda/bindings/runtime.pyx":21940 * if len(opList) > 1 and cyopList is not NULL: * free(cyopList) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], failIdx) */ __pyx_t_2 = (__pyx_v_err != cudaSuccess); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":21941 * free(cyopList) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], failIdx) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21941, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_9 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 21941, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_9); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21941, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 21941, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 21941, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 1, Py_None) != (0)) __PYX_ERR(0, 21941, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_9; __pyx_t_9 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":21940 * if len(opList) > 1 and cyopList is not NULL: * free(cyopList) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], failIdx) */ } /* "cuda/bindings/runtime.pyx":21942 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], failIdx) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 21942, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21942, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_9, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21942, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyLong_FromSize_t(__pyx_v_failIdx); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 21942, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 21942, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 21942, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 21942, __pyx_L1_error); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_r = __pyx_t_9; __pyx_t_9 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":21806 * return (_dict_cudaError_t[err], failIdx) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpy3DBatchAsync(size_t numOps, opList : Optional[tuple[cudaMemcpy3DBatchOp] | list[cudaMemcpy3DBatchOp]], unsigned long long flags, stream): * """ Performs a batch of 3D memory copies asynchronously. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpy3DBatchAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XDECREF(__pyx_gb_4cuda_8bindings_7runtime_22cudaMemcpy3DBatchAsync_2generator90); __Pyx_XDECREF(__pyx_v_opList); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":21944 * return (_dict_cudaError_t[err], failIdx) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpy2DAsync(dst, size_t dpitch, src, size_t spitch, size_t width, size_t height, kind not None : cudaMemcpyKind, stream): * """ Copies data between host and device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_257cudaMemcpy2DAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_256cudaMemcpy2DAsync, "cudaMemcpy2DAsync(dst, size_t dpitch, src, size_t spitch, size_t width, size_t height, kind: cudaMemcpyKind, stream)\n\nCopies data between host and device.\n\nCopies a matrix (`height` rows of `width` bytes each) from the memory\narea pointed to by `src` to the memory area pointed to by `dst`, where\n`kind` specifies the direction of the copy, and must be one of\n:py:obj:`~.cudaMemcpyHostToHost`, :py:obj:`~.cudaMemcpyHostToDevice`,\n:py:obj:`~.cudaMemcpyDeviceToHost`,\n:py:obj:`~.cudaMemcpyDeviceToDevice`, or :py:obj:`~.cudaMemcpyDefault`.\nPassing :py:obj:`~.cudaMemcpyDefault` is recommended, in which case the\ntype of transfer is inferred from the pointer values. However,\n:py:obj:`~.cudaMemcpyDefault` is only allowed on systems that support\nunified virtual addressing. `dpitch` and `spitch` are the widths in\nmemory in bytes of the 2D arrays pointed to by `dst` and `src`,\nincluding any padding added to the end of each row. The memory areas\nmay not overlap. `width` must not exceed either `dpitch` or `spitch`.\n\nCalling :py:obj:`~.cudaMemcpy2DAsync()` with `dst` and `src` pointers\nthat do not match the direction of the copy results in an undefined\nbehavior. :py:obj:`~.cudaMemcpy2DAsync()` returns an error if `dpitch`\nor `spitch` is greater than the maximum allowed.\n\n:py:obj:`~.cudaMemcpy2DAsync()` is asynchronous with respect to the\nhost, so the call may return before the copy is complete. The copy can\noptionally be associated to a stream by passing a non-zero `stream`\nargument. If `kind` is :py:obj:`~.cudaMemcpyHostToDevice` or\n:py:obj:`~.cudaMemcpyDeviceToHost` and `stream` is non-zero, the copy\nmay overlap with operations in other streams.\n\nThe device version of this function only handles device to device\ncopies and cannot be given local or shared pointers.\n\nParameters\n----------\ndst : Any\n Destination memory address\ndpitch : size_t\n Pitch of destination memory\nsrc : Any\n Source memory address\nspitch : size_t\n Pitch of sou""rce memory\nwidth : size_t\n Width of matrix transfer (columns in bytes)\nheight : size_t\n Height of matrix transfer (rows)\nkind : :py:obj:`~.cudaMemcpyKind`\n Type of transfer\nstream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Stream identifier\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidPitchValue`, :py:obj:`~.cudaErrorInvalidMemcpyDirection`\n\nSee Also\n--------\n:py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpy2D`, :py:obj:`~.cudaMemcpy2DToArray`, :py:obj:`~.cudaMemcpy2DFromArray`, :py:obj:`~.cudaMemcpy2DArrayToArray`, :py:obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy2DToArrayAsync`, :py:obj:`~.cudaMemcpy2DFromArrayAsync`, :py:obj:`~.cudaMemcpyToSymbolAsync`, :py:obj:`~.cudaMemcpyFromSymbolAsync`, :py:obj:`~.cuMemcpy2DAsync`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_257cudaMemcpy2DAsync = {"cudaMemcpy2DAsync", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_257cudaMemcpy2DAsync, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_256cudaMemcpy2DAsync}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_257cudaMemcpy2DAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_dst = 0; size_t __pyx_v_dpitch; PyObject *__pyx_v_src = 0; size_t __pyx_v_spitch; size_t __pyx_v_width; size_t __pyx_v_height; PyObject *__pyx_v_kind = 0; PyObject *__pyx_v_stream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[8] = {0,0,0,0,0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemcpy2DAsync (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_dst_2,&__pyx_mstate_global->__pyx_n_u_dpitch,&__pyx_mstate_global->__pyx_n_u_src_2,&__pyx_mstate_global->__pyx_n_u_spitch,&__pyx_mstate_global->__pyx_n_u_width_2,&__pyx_mstate_global->__pyx_n_u_height_2,&__pyx_mstate_global->__pyx_n_u_kind_2,&__pyx_mstate_global->__pyx_n_u_stream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 21944, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 8: values[7] = __Pyx_ArgRef_FASTCALL(__pyx_args, 7); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[7])) __PYX_ERR(0, 21944, __pyx_L3_error) CYTHON_FALLTHROUGH; case 7: values[6] = __Pyx_ArgRef_FASTCALL(__pyx_args, 6); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[6])) __PYX_ERR(0, 21944, __pyx_L3_error) CYTHON_FALLTHROUGH; case 6: values[5] = __Pyx_ArgRef_FASTCALL(__pyx_args, 5); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[5])) __PYX_ERR(0, 21944, __pyx_L3_error) CYTHON_FALLTHROUGH; case 5: values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 21944, __pyx_L3_error) CYTHON_FALLTHROUGH; case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 21944, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 21944, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21944, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21944, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemcpy2DAsync", 0) < (0)) __PYX_ERR(0, 21944, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 8; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemcpy2DAsync", 1, 8, 8, i); __PYX_ERR(0, 21944, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 8)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 21944, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 21944, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 21944, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 21944, __pyx_L3_error) values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 21944, __pyx_L3_error) values[5] = __Pyx_ArgRef_FASTCALL(__pyx_args, 5); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[5])) __PYX_ERR(0, 21944, __pyx_L3_error) values[6] = __Pyx_ArgRef_FASTCALL(__pyx_args, 6); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[6])) __PYX_ERR(0, 21944, __pyx_L3_error) values[7] = __Pyx_ArgRef_FASTCALL(__pyx_args, 7); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[7])) __PYX_ERR(0, 21944, __pyx_L3_error) } __pyx_v_dst = values[0]; __pyx_v_dpitch = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_dpitch == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 21945, __pyx_L3_error) __pyx_v_src = values[2]; __pyx_v_spitch = __Pyx_PyLong_As_size_t(values[3]); if (unlikely((__pyx_v_spitch == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 21945, __pyx_L3_error) __pyx_v_width = __Pyx_PyLong_As_size_t(values[4]); if (unlikely((__pyx_v_width == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 21945, __pyx_L3_error) __pyx_v_height = __Pyx_PyLong_As_size_t(values[5]); if (unlikely((__pyx_v_height == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 21945, __pyx_L3_error) __pyx_v_kind = values[6]; __pyx_v_stream = values[7]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemcpy2DAsync", 1, 8, 8, __pyx_nargs); __PYX_ERR(0, 21944, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpy2DAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_kind) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "kind"); __PYX_ERR(0, 21945, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_256cudaMemcpy2DAsync(__pyx_self, __pyx_v_dst, __pyx_v_dpitch, __pyx_v_src, __pyx_v_spitch, __pyx_v_width, __pyx_v_height, __pyx_v_kind, __pyx_v_stream); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_256cudaMemcpy2DAsync(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_dst, size_t __pyx_v_dpitch, PyObject *__pyx_v_src, size_t __pyx_v_spitch, size_t __pyx_v_width, size_t __pyx_v_height, PyObject *__pyx_v_kind, PyObject *__pyx_v_stream) { cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cydst = NULL; void *__pyx_v_cydst_ptr; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cysrc = NULL; void *__pyx_v_cysrc_ptr; enum cudaMemcpyKind __pyx_v_cykind; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; enum cudaMemcpyKind __pyx_t_8; cudaError_t __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemcpy2DAsync", 0); /* "cuda/bindings/runtime.pyx":22006 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":22007 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":22006 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":22008 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":22009 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22009, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":22008 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":22011 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * cydst = _HelperInputVoidPtr(dst) */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_stream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22011, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22011, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":22012 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * cydst = _HelperInputVoidPtr(dst) * cdef void* cydst_ptr = cydst.cptr */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 22012, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":22013 * pstream = int(cudaStream_t(stream)) * cystream = pstream * cydst = _HelperInputVoidPtr(dst) # <<<<<<<<<<<<<< * cdef void* cydst_ptr = cydst.cptr * cysrc = _HelperInputVoidPtr(src) */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_dst}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22013, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_cydst = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":22014 * cystream = pstream * cydst = _HelperInputVoidPtr(dst) * cdef void* cydst_ptr = cydst.cptr # <<<<<<<<<<<<<< * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cydst), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22014, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_5); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 22014, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_cydst_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":22015 * cydst = _HelperInputVoidPtr(dst) * cdef void* cydst_ptr = cydst.cptr * cysrc = _HelperInputVoidPtr(src) # <<<<<<<<<<<<<< * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value */ __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_src}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22015, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_cysrc = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":22016 * cdef void* cydst_ptr = cydst.cptr * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cysrc), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22016, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_5); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 22016, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_cysrc_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":22017 * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemcpy2DAsync(cydst_ptr, dpitch, cysrc_ptr, spitch, width, height, cykind, cystream) */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_kind, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22017, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_8 = ((enum cudaMemcpyKind)__Pyx_PyLong_As_enum__cudaMemcpyKind(__pyx_t_5)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 22017, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_cykind = __pyx_t_8; /* "cuda/bindings/runtime.pyx":22018 * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpy2DAsync(cydst_ptr, dpitch, cysrc_ptr, spitch, width, height, cykind, cystream) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":22019 * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: * err = cyruntime.cudaMemcpy2DAsync(cydst_ptr, dpitch, cysrc_ptr, spitch, width, height, cykind, cystream) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_9 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemcpy2DAsync(__pyx_v_cydst_ptr, __pyx_v_dpitch, __pyx_v_cysrc_ptr, __pyx_v_spitch, __pyx_v_width, __pyx_v_height, __pyx_v_cykind, __pyx_v_cystream); if (unlikely(__pyx_t_9 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22019, __pyx_L7_error) __pyx_v_err = __pyx_t_9; } /* "cuda/bindings/runtime.pyx":22018 * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpy2DAsync(cydst_ptr, dpitch, cysrc_ptr, spitch, width, height, cykind, cystream) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":22020 * with nogil: * err = cyruntime.cudaMemcpy2DAsync(cydst_ptr, dpitch, cysrc_ptr, spitch, width, height, cykind, cystream) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22020, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22020, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22020, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22020, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 22020, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":21944 * return (_dict_cudaError_t[err], failIdx) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpy2DAsync(dst, size_t dpitch, src, size_t spitch, size_t width, size_t height, kind not None : cudaMemcpyKind, stream): * """ Copies data between host and device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpy2DAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XDECREF((PyObject *)__pyx_v_cydst); __Pyx_XDECREF((PyObject *)__pyx_v_cysrc); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":22022 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpy2DToArrayAsync(dst, size_t wOffset, size_t hOffset, src, size_t spitch, size_t width, size_t height, kind not None : cudaMemcpyKind, stream): * """ Copies data between host and device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_259cudaMemcpy2DToArrayAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_258cudaMemcpy2DToArrayAsync, "cudaMemcpy2DToArrayAsync(dst, size_t wOffset, size_t hOffset, src, size_t spitch, size_t width, size_t height, kind: cudaMemcpyKind, stream)\n\nCopies data between host and device.\n\nCopies a matrix (`height` rows of `width` bytes each) from the memory\narea pointed to by `src` to the CUDA array `dst` starting at `hOffset`\nrows and `wOffset` bytes from the upper left corner, where `kind`\nspecifies the direction of the copy, and must be one of\n:py:obj:`~.cudaMemcpyHostToHost`, :py:obj:`~.cudaMemcpyHostToDevice`,\n:py:obj:`~.cudaMemcpyDeviceToHost`,\n:py:obj:`~.cudaMemcpyDeviceToDevice`, or :py:obj:`~.cudaMemcpyDefault`.\nPassing :py:obj:`~.cudaMemcpyDefault` is recommended, in which case the\ntype of transfer is inferred from the pointer values. However,\n:py:obj:`~.cudaMemcpyDefault` is only allowed on systems that support\nunified virtual addressing. `spitch` is the width in memory in bytes of\nthe 2D array pointed to by `src`, including any padding added to the\nend of each row. `wOffset` + `width` must not exceed the width of the\nCUDA array `dst`. `width` must not exceed `spitch`.\n:py:obj:`~.cudaMemcpy2DToArrayAsync()` returns an error if `spitch`\nexceeds the maximum allowed.\n\n:py:obj:`~.cudaMemcpy2DToArrayAsync()` is asynchronous with respect to\nthe host, so the call may return before the copy is complete. The copy\ncan optionally be associated to a stream by passing a non-zero `stream`\nargument. If `kind` is :py:obj:`~.cudaMemcpyHostToDevice` or\n:py:obj:`~.cudaMemcpyDeviceToHost` and `stream` is non-zero, the copy\nmay overlap with operations in other streams.\n\n:py:obj:`~.cudaMemcpy2DFromArrayAsync`,\n:py:obj:`~.cudaMemcpyToSymbolAsync`,\n:py:obj:`~.cudaMemcpyFromSymbolAsync`, :py:obj:`~.cuMemcpy2DAsync`\n\nParameters\n----------\ndst : :py:obj:`~.cudaArray_t`\n Destination memory address\nwOffset : size_t\n Destination starting X offset (columns in bytes)\nhOffset : size_t\n Destination starting Y offset (rows)\nsrc : Any\n Source mem""ory address\nspitch : size_t\n Pitch of source memory\nwidth : size_t\n Width of matrix transfer (columns in bytes)\nheight : size_t\n Height of matrix transfer (rows)\nkind : :py:obj:`~.cudaMemcpyKind`\n Type of transfer\nstream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Stream identifier\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidPitchValue`, :py:obj:`~.cudaErrorInvalidMemcpyDirection`\n\nSee Also\n--------\n:py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpy2D`, :py:obj:`~.cudaMemcpy2DToArray`, :py:obj:`~.cudaMemcpy2DFromArray`, :py:obj:`~.cudaMemcpy2DArrayToArray`, :py:obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy2DAsync`,"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_259cudaMemcpy2DToArrayAsync = {"cudaMemcpy2DToArrayAsync", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_259cudaMemcpy2DToArrayAsync, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_258cudaMemcpy2DToArrayAsync}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_259cudaMemcpy2DToArrayAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_dst = 0; size_t __pyx_v_wOffset; size_t __pyx_v_hOffset; PyObject *__pyx_v_src = 0; size_t __pyx_v_spitch; size_t __pyx_v_width; size_t __pyx_v_height; PyObject *__pyx_v_kind = 0; PyObject *__pyx_v_stream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[9] = {0,0,0,0,0,0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemcpy2DToArrayAsync (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_dst_2,&__pyx_mstate_global->__pyx_n_u_wOffset,&__pyx_mstate_global->__pyx_n_u_hOffset,&__pyx_mstate_global->__pyx_n_u_src_2,&__pyx_mstate_global->__pyx_n_u_spitch,&__pyx_mstate_global->__pyx_n_u_width_2,&__pyx_mstate_global->__pyx_n_u_height_2,&__pyx_mstate_global->__pyx_n_u_kind_2,&__pyx_mstate_global->__pyx_n_u_stream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22022, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 9: values[8] = __Pyx_ArgRef_FASTCALL(__pyx_args, 8); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[8])) __PYX_ERR(0, 22022, __pyx_L3_error) CYTHON_FALLTHROUGH; case 8: values[7] = __Pyx_ArgRef_FASTCALL(__pyx_args, 7); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[7])) __PYX_ERR(0, 22022, __pyx_L3_error) CYTHON_FALLTHROUGH; case 7: values[6] = __Pyx_ArgRef_FASTCALL(__pyx_args, 6); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[6])) __PYX_ERR(0, 22022, __pyx_L3_error) CYTHON_FALLTHROUGH; case 6: values[5] = __Pyx_ArgRef_FASTCALL(__pyx_args, 5); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[5])) __PYX_ERR(0, 22022, __pyx_L3_error) CYTHON_FALLTHROUGH; case 5: values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 22022, __pyx_L3_error) CYTHON_FALLTHROUGH; case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 22022, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22022, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22022, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22022, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemcpy2DToArrayAsync", 0) < (0)) __PYX_ERR(0, 22022, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 9; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemcpy2DToArrayAsync", 1, 9, 9, i); __PYX_ERR(0, 22022, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 9)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22022, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22022, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22022, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 22022, __pyx_L3_error) values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 22022, __pyx_L3_error) values[5] = __Pyx_ArgRef_FASTCALL(__pyx_args, 5); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[5])) __PYX_ERR(0, 22022, __pyx_L3_error) values[6] = __Pyx_ArgRef_FASTCALL(__pyx_args, 6); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[6])) __PYX_ERR(0, 22022, __pyx_L3_error) values[7] = __Pyx_ArgRef_FASTCALL(__pyx_args, 7); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[7])) __PYX_ERR(0, 22022, __pyx_L3_error) values[8] = __Pyx_ArgRef_FASTCALL(__pyx_args, 8); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[8])) __PYX_ERR(0, 22022, __pyx_L3_error) } __pyx_v_dst = values[0]; __pyx_v_wOffset = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_wOffset == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 22023, __pyx_L3_error) __pyx_v_hOffset = __Pyx_PyLong_As_size_t(values[2]); if (unlikely((__pyx_v_hOffset == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 22023, __pyx_L3_error) __pyx_v_src = values[3]; __pyx_v_spitch = __Pyx_PyLong_As_size_t(values[4]); if (unlikely((__pyx_v_spitch == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 22023, __pyx_L3_error) __pyx_v_width = __Pyx_PyLong_As_size_t(values[5]); if (unlikely((__pyx_v_width == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 22023, __pyx_L3_error) __pyx_v_height = __Pyx_PyLong_As_size_t(values[6]); if (unlikely((__pyx_v_height == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 22023, __pyx_L3_error) __pyx_v_kind = values[7]; __pyx_v_stream = values[8]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemcpy2DToArrayAsync", 1, 9, 9, __pyx_nargs); __PYX_ERR(0, 22022, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpy2DToArrayAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_kind) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "kind"); __PYX_ERR(0, 22023, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_258cudaMemcpy2DToArrayAsync(__pyx_self, __pyx_v_dst, __pyx_v_wOffset, __pyx_v_hOffset, __pyx_v_src, __pyx_v_spitch, __pyx_v_width, __pyx_v_height, __pyx_v_kind, __pyx_v_stream); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_258cudaMemcpy2DToArrayAsync(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_dst, size_t __pyx_v_wOffset, size_t __pyx_v_hOffset, PyObject *__pyx_v_src, size_t __pyx_v_spitch, size_t __pyx_v_width, size_t __pyx_v_height, PyObject *__pyx_v_kind, PyObject *__pyx_v_stream) { cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; cudaArray_t __pyx_v_cydst; PyObject *__pyx_v_pdst = NULL; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cysrc = NULL; void *__pyx_v_cysrc_ptr; enum cudaMemcpyKind __pyx_v_cykind; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; enum cudaMemcpyKind __pyx_t_8; cudaError_t __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemcpy2DToArrayAsync", 0); /* "cuda/bindings/runtime.pyx":22085 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":22086 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":22085 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":22087 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":22088 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22088, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":22087 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":22090 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * cdef cyruntime.cudaArray_t cydst */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_stream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22090, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22090, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":22091 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * cdef cyruntime.cudaArray_t cydst * if dst is None: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 22091, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":22093 * cystream = pstream * cdef cyruntime.cudaArray_t cydst * if dst is None: # <<<<<<<<<<<<<< * pdst = 0 * elif isinstance(dst, (cudaArray_t,)): */ __pyx_t_1 = (__pyx_v_dst == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":22094 * cdef cyruntime.cudaArray_t cydst * if dst is None: * pdst = 0 # <<<<<<<<<<<<<< * elif isinstance(dst, (cudaArray_t,)): * pdst = int(dst) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pdst = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":22093 * cystream = pstream * cdef cyruntime.cudaArray_t cydst * if dst is None: # <<<<<<<<<<<<<< * pdst = 0 * elif isinstance(dst, (cudaArray_t,)): */ goto __pyx_L6; } /* "cuda/bindings/runtime.pyx":22095 * if dst is None: * pdst = 0 * elif isinstance(dst, (cudaArray_t,)): # <<<<<<<<<<<<<< * pdst = int(dst) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_dst, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":22096 * pdst = 0 * elif isinstance(dst, (cudaArray_t,)): * pdst = int(dst) # <<<<<<<<<<<<<< * else: * pdst = int(cudaArray_t(dst)) */ __pyx_t_5 = __Pyx_PyNumber_Int(__pyx_v_dst); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22096, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_v_pdst = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":22095 * if dst is None: * pdst = 0 * elif isinstance(dst, (cudaArray_t,)): # <<<<<<<<<<<<<< * pdst = int(dst) * else: */ goto __pyx_L6; } /* "cuda/bindings/runtime.pyx":22098 * pdst = int(dst) * else: * pdst = int(cudaArray_t(dst)) # <<<<<<<<<<<<<< * cydst = pdst * cysrc = _HelperInputVoidPtr(src) */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_dst}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22098, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_5)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22098, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_5); __pyx_t_5 = 0; __pyx_v_pdst = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L6:; /* "cuda/bindings/runtime.pyx":22099 * else: * pdst = int(cudaArray_t(dst)) * cydst = pdst # <<<<<<<<<<<<<< * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pdst); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 22099, __pyx_L1_error) __pyx_v_cydst = ((cudaArray_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":22100 * pdst = int(cudaArray_t(dst)) * cydst = pdst * cysrc = _HelperInputVoidPtr(src) # <<<<<<<<<<<<<< * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value */ __pyx_t_5 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_v_src}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22100, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_4); } __pyx_v_cysrc = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_4); __pyx_t_4 = 0; /* "cuda/bindings/runtime.pyx":22101 * cydst = pdst * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cysrc), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22101, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_4); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 22101, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_cysrc_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":22102 * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemcpy2DToArrayAsync(cydst, wOffset, hOffset, cysrc_ptr, spitch, width, height, cykind, cystream) */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_kind, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22102, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_8 = ((enum cudaMemcpyKind)__Pyx_PyLong_As_enum__cudaMemcpyKind(__pyx_t_4)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 22102, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_cykind = __pyx_t_8; /* "cuda/bindings/runtime.pyx":22103 * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpy2DToArrayAsync(cydst, wOffset, hOffset, cysrc_ptr, spitch, width, height, cykind, cystream) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":22104 * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: * err = cyruntime.cudaMemcpy2DToArrayAsync(cydst, wOffset, hOffset, cysrc_ptr, spitch, width, height, cykind, cystream) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_9 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemcpy2DToArrayAsync(__pyx_v_cydst, __pyx_v_wOffset, __pyx_v_hOffset, __pyx_v_cysrc_ptr, __pyx_v_spitch, __pyx_v_width, __pyx_v_height, __pyx_v_cykind, __pyx_v_cystream); if (unlikely(__pyx_t_9 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22104, __pyx_L8_error) __pyx_v_err = __pyx_t_9; } /* "cuda/bindings/runtime.pyx":22103 * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpy2DToArrayAsync(cydst, wOffset, hOffset, cysrc_ptr, spitch, width, height, cykind, cystream) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L9; } __pyx_L8_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L9:; } } /* "cuda/bindings/runtime.pyx":22105 * with nogil: * err = cyruntime.cudaMemcpy2DToArrayAsync(cydst, wOffset, hOffset, cysrc_ptr, spitch, width, height, cykind, cystream) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22105, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22105, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22105, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22105, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 22105, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":22022 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpy2DToArrayAsync(dst, size_t wOffset, size_t hOffset, src, size_t spitch, size_t width, size_t height, kind not None : cudaMemcpyKind, stream): * """ Copies data between host and device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpy2DToArrayAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XDECREF(__pyx_v_pdst); __Pyx_XDECREF((PyObject *)__pyx_v_cysrc); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":22107 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpy2DFromArrayAsync(dst, size_t dpitch, src, size_t wOffset, size_t hOffset, size_t width, size_t height, kind not None : cudaMemcpyKind, stream): * """ Copies data between host and device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_261cudaMemcpy2DFromArrayAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_260cudaMemcpy2DFromArrayAsync, "cudaMemcpy2DFromArrayAsync(dst, size_t dpitch, src, size_t wOffset, size_t hOffset, size_t width, size_t height, kind: cudaMemcpyKind, stream)\n\nCopies data between host and device.\n\nCopies a matrix (`height` rows of `width` bytes each) from the CUDA\narray `src` starting at `hOffset` rows and `wOffset` bytes from the\nupper left corner to the memory area pointed to by `dst`, where `kind`\nspecifies the direction of the copy, and must be one of\n:py:obj:`~.cudaMemcpyHostToHost`, :py:obj:`~.cudaMemcpyHostToDevice`,\n:py:obj:`~.cudaMemcpyDeviceToHost`,\n:py:obj:`~.cudaMemcpyDeviceToDevice`, or :py:obj:`~.cudaMemcpyDefault`.\nPassing :py:obj:`~.cudaMemcpyDefault` is recommended, in which case the\ntype of transfer is inferred from the pointer values. However,\n:py:obj:`~.cudaMemcpyDefault` is only allowed on systems that support\nunified virtual addressing. `dpitch` is the width in memory in bytes of\nthe 2D array pointed to by `dst`, including any padding added to the\nend of each row. `wOffset` + `width` must not exceed the width of the\nCUDA array `src`. `width` must not exceed `dpitch`.\n:py:obj:`~.cudaMemcpy2DFromArrayAsync()` returns an error if `dpitch`\nexceeds the maximum allowed.\n\n:py:obj:`~.cudaMemcpy2DFromArrayAsync()` is asynchronous with respect\nto the host, so the call may return before the copy is complete. The\ncopy can optionally be associated to a stream by passing a non-zero\n`stream` argument. If `kind` is :py:obj:`~.cudaMemcpyHostToDevice` or\n:py:obj:`~.cudaMemcpyDeviceToHost` and `stream` is non-zero, the copy\nmay overlap with operations in other streams.\n\n:py:obj:`~.cudaMemcpyToSymbolAsync`,\n:py:obj:`~.cudaMemcpyFromSymbolAsync`, :py:obj:`~.cuMemcpy2DAsync`\n\nParameters\n----------\ndst : Any\n Destination memory address\ndpitch : size_t\n Pitch of destination memory\nsrc : :py:obj:`~.cudaArray_const_t`\n Source memory address\nwOffset : size_t\n Source starting X offset (columns in bytes)\nhOffset : size_t\n Source st""arting Y offset (rows)\nwidth : size_t\n Width of matrix transfer (columns in bytes)\nheight : size_t\n Height of matrix transfer (rows)\nkind : :py:obj:`~.cudaMemcpyKind`\n Type of transfer\nstream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Stream identifier\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidPitchValue`, :py:obj:`~.cudaErrorInvalidMemcpyDirection`\n\nSee Also\n--------\n:py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpy2D`, :py:obj:`~.cudaMemcpy2DToArray`, :py:obj:`~.cudaMemcpy2DFromArray`, :py:obj:`~.cudaMemcpy2DArrayToArray`, :py:obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy2DAsync`, :py:obj:`~.cudaMemcpy2DToArrayAsync`,"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_261cudaMemcpy2DFromArrayAsync = {"cudaMemcpy2DFromArrayAsync", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_261cudaMemcpy2DFromArrayAsync, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_260cudaMemcpy2DFromArrayAsync}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_261cudaMemcpy2DFromArrayAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_dst = 0; size_t __pyx_v_dpitch; PyObject *__pyx_v_src = 0; size_t __pyx_v_wOffset; size_t __pyx_v_hOffset; size_t __pyx_v_width; size_t __pyx_v_height; PyObject *__pyx_v_kind = 0; PyObject *__pyx_v_stream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[9] = {0,0,0,0,0,0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemcpy2DFromArrayAsync (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_dst_2,&__pyx_mstate_global->__pyx_n_u_dpitch,&__pyx_mstate_global->__pyx_n_u_src_2,&__pyx_mstate_global->__pyx_n_u_wOffset,&__pyx_mstate_global->__pyx_n_u_hOffset,&__pyx_mstate_global->__pyx_n_u_width_2,&__pyx_mstate_global->__pyx_n_u_height_2,&__pyx_mstate_global->__pyx_n_u_kind_2,&__pyx_mstate_global->__pyx_n_u_stream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22107, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 9: values[8] = __Pyx_ArgRef_FASTCALL(__pyx_args, 8); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[8])) __PYX_ERR(0, 22107, __pyx_L3_error) CYTHON_FALLTHROUGH; case 8: values[7] = __Pyx_ArgRef_FASTCALL(__pyx_args, 7); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[7])) __PYX_ERR(0, 22107, __pyx_L3_error) CYTHON_FALLTHROUGH; case 7: values[6] = __Pyx_ArgRef_FASTCALL(__pyx_args, 6); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[6])) __PYX_ERR(0, 22107, __pyx_L3_error) CYTHON_FALLTHROUGH; case 6: values[5] = __Pyx_ArgRef_FASTCALL(__pyx_args, 5); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[5])) __PYX_ERR(0, 22107, __pyx_L3_error) CYTHON_FALLTHROUGH; case 5: values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 22107, __pyx_L3_error) CYTHON_FALLTHROUGH; case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 22107, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22107, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22107, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22107, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemcpy2DFromArrayAsync", 0) < (0)) __PYX_ERR(0, 22107, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 9; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemcpy2DFromArrayAsync", 1, 9, 9, i); __PYX_ERR(0, 22107, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 9)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22107, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22107, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22107, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 22107, __pyx_L3_error) values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 22107, __pyx_L3_error) values[5] = __Pyx_ArgRef_FASTCALL(__pyx_args, 5); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[5])) __PYX_ERR(0, 22107, __pyx_L3_error) values[6] = __Pyx_ArgRef_FASTCALL(__pyx_args, 6); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[6])) __PYX_ERR(0, 22107, __pyx_L3_error) values[7] = __Pyx_ArgRef_FASTCALL(__pyx_args, 7); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[7])) __PYX_ERR(0, 22107, __pyx_L3_error) values[8] = __Pyx_ArgRef_FASTCALL(__pyx_args, 8); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[8])) __PYX_ERR(0, 22107, __pyx_L3_error) } __pyx_v_dst = values[0]; __pyx_v_dpitch = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_dpitch == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 22108, __pyx_L3_error) __pyx_v_src = values[2]; __pyx_v_wOffset = __Pyx_PyLong_As_size_t(values[3]); if (unlikely((__pyx_v_wOffset == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 22108, __pyx_L3_error) __pyx_v_hOffset = __Pyx_PyLong_As_size_t(values[4]); if (unlikely((__pyx_v_hOffset == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 22108, __pyx_L3_error) __pyx_v_width = __Pyx_PyLong_As_size_t(values[5]); if (unlikely((__pyx_v_width == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 22108, __pyx_L3_error) __pyx_v_height = __Pyx_PyLong_As_size_t(values[6]); if (unlikely((__pyx_v_height == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 22108, __pyx_L3_error) __pyx_v_kind = values[7]; __pyx_v_stream = values[8]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemcpy2DFromArrayAsync", 1, 9, 9, __pyx_nargs); __PYX_ERR(0, 22107, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpy2DFromArrayAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_kind) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "kind"); __PYX_ERR(0, 22108, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_260cudaMemcpy2DFromArrayAsync(__pyx_self, __pyx_v_dst, __pyx_v_dpitch, __pyx_v_src, __pyx_v_wOffset, __pyx_v_hOffset, __pyx_v_width, __pyx_v_height, __pyx_v_kind, __pyx_v_stream); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_260cudaMemcpy2DFromArrayAsync(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_dst, size_t __pyx_v_dpitch, PyObject *__pyx_v_src, size_t __pyx_v_wOffset, size_t __pyx_v_hOffset, size_t __pyx_v_width, size_t __pyx_v_height, PyObject *__pyx_v_kind, PyObject *__pyx_v_stream) { cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; cudaArray_const_t __pyx_v_cysrc; PyObject *__pyx_v_psrc = NULL; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cydst = NULL; void *__pyx_v_cydst_ptr; enum cudaMemcpyKind __pyx_v_cykind; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; enum cudaMemcpyKind __pyx_t_8; cudaError_t __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemcpy2DFromArrayAsync", 0); /* "cuda/bindings/runtime.pyx":22169 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":22170 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":22169 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":22171 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":22172 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22172, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":22171 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":22174 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * cdef cyruntime.cudaArray_const_t cysrc */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_stream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22174, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22174, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":22175 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * cdef cyruntime.cudaArray_const_t cysrc * if src is None: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 22175, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":22177 * cystream = pstream * cdef cyruntime.cudaArray_const_t cysrc * if src is None: # <<<<<<<<<<<<<< * psrc = 0 * elif isinstance(src, (cudaArray_const_t,)): */ __pyx_t_1 = (__pyx_v_src == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":22178 * cdef cyruntime.cudaArray_const_t cysrc * if src is None: * psrc = 0 # <<<<<<<<<<<<<< * elif isinstance(src, (cudaArray_const_t,)): * psrc = int(src) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_psrc = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":22177 * cystream = pstream * cdef cyruntime.cudaArray_const_t cysrc * if src is None: # <<<<<<<<<<<<<< * psrc = 0 * elif isinstance(src, (cudaArray_const_t,)): */ goto __pyx_L6; } /* "cuda/bindings/runtime.pyx":22179 * if src is None: * psrc = 0 * elif isinstance(src, (cudaArray_const_t,)): # <<<<<<<<<<<<<< * psrc = int(src) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_src, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_const_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":22180 * psrc = 0 * elif isinstance(src, (cudaArray_const_t,)): * psrc = int(src) # <<<<<<<<<<<<<< * else: * psrc = int(cudaArray_const_t(src)) */ __pyx_t_5 = __Pyx_PyNumber_Int(__pyx_v_src); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22180, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_v_psrc = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":22179 * if src is None: * psrc = 0 * elif isinstance(src, (cudaArray_const_t,)): # <<<<<<<<<<<<<< * psrc = int(src) * else: */ goto __pyx_L6; } /* "cuda/bindings/runtime.pyx":22182 * psrc = int(src) * else: * psrc = int(cudaArray_const_t(src)) # <<<<<<<<<<<<<< * cysrc = psrc * cydst = _HelperInputVoidPtr(dst) */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_const_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_const_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_src}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22182, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_5)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22182, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_5); __pyx_t_5 = 0; __pyx_v_psrc = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L6:; /* "cuda/bindings/runtime.pyx":22183 * else: * psrc = int(cudaArray_const_t(src)) * cysrc = psrc # <<<<<<<<<<<<<< * cydst = _HelperInputVoidPtr(dst) * cdef void* cydst_ptr = cydst.cptr */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_psrc); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 22183, __pyx_L1_error) __pyx_v_cysrc = ((cudaArray_const_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":22184 * psrc = int(cudaArray_const_t(src)) * cysrc = psrc * cydst = _HelperInputVoidPtr(dst) # <<<<<<<<<<<<<< * cdef void* cydst_ptr = cydst.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value */ __pyx_t_5 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_v_dst}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22184, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_4); } __pyx_v_cydst = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_4); __pyx_t_4 = 0; /* "cuda/bindings/runtime.pyx":22185 * cysrc = psrc * cydst = _HelperInputVoidPtr(dst) * cdef void* cydst_ptr = cydst.cptr # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cydst), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22185, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_4); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 22185, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_cydst_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":22186 * cydst = _HelperInputVoidPtr(dst) * cdef void* cydst_ptr = cydst.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemcpy2DFromArrayAsync(cydst_ptr, dpitch, cysrc, wOffset, hOffset, width, height, cykind, cystream) */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_kind, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22186, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_8 = ((enum cudaMemcpyKind)__Pyx_PyLong_As_enum__cudaMemcpyKind(__pyx_t_4)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 22186, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_cykind = __pyx_t_8; /* "cuda/bindings/runtime.pyx":22187 * cdef void* cydst_ptr = cydst.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpy2DFromArrayAsync(cydst_ptr, dpitch, cysrc, wOffset, hOffset, width, height, cykind, cystream) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":22188 * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: * err = cyruntime.cudaMemcpy2DFromArrayAsync(cydst_ptr, dpitch, cysrc, wOffset, hOffset, width, height, cykind, cystream) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_9 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemcpy2DFromArrayAsync(__pyx_v_cydst_ptr, __pyx_v_dpitch, __pyx_v_cysrc, __pyx_v_wOffset, __pyx_v_hOffset, __pyx_v_width, __pyx_v_height, __pyx_v_cykind, __pyx_v_cystream); if (unlikely(__pyx_t_9 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22188, __pyx_L8_error) __pyx_v_err = __pyx_t_9; } /* "cuda/bindings/runtime.pyx":22187 * cdef void* cydst_ptr = cydst.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpy2DFromArrayAsync(cydst_ptr, dpitch, cysrc, wOffset, hOffset, width, height, cykind, cystream) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L9; } __pyx_L8_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L9:; } } /* "cuda/bindings/runtime.pyx":22189 * with nogil: * err = cyruntime.cudaMemcpy2DFromArrayAsync(cydst_ptr, dpitch, cysrc, wOffset, hOffset, width, height, cykind, cystream) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22189, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22189, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22189, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22189, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 22189, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":22107 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpy2DFromArrayAsync(dst, size_t dpitch, src, size_t wOffset, size_t hOffset, size_t width, size_t height, kind not None : cudaMemcpyKind, stream): * """ Copies data between host and device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpy2DFromArrayAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XDECREF(__pyx_v_psrc); __Pyx_XDECREF((PyObject *)__pyx_v_cydst); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":22191 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemset(devPtr, int value, size_t count): * """ Initializes or sets device memory to a value. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_263cudaMemset(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_262cudaMemset, "cudaMemset(devPtr, int value, size_t count)\n\nInitializes or sets device memory to a value.\n\nFills the first `count` bytes of the memory area pointed to by `devPtr`\nwith the constant byte value `value`.\n\nNote that this function is asynchronous with respect to the host unless\n`devPtr` refers to pinned host memory.\n\nParameters\n----------\ndevPtr : Any\n Pointer to device memory\nvalue : int\n Value to set for each byte of specified memory\ncount : size_t\n Size in bytes to set\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`,\n\nSee Also\n--------\n:py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD32`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_263cudaMemset = {"cudaMemset", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_263cudaMemset, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_262cudaMemset}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_263cudaMemset(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_devPtr = 0; int __pyx_v_value; size_t __pyx_v_count; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[3] = {0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemset (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_devPtr_2,&__pyx_mstate_global->__pyx_n_u_value,&__pyx_mstate_global->__pyx_n_u_count,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22191, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22191, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22191, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22191, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemset", 0) < (0)) __PYX_ERR(0, 22191, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 3; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemset", 1, 3, 3, i); __PYX_ERR(0, 22191, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 3)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22191, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22191, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22191, __pyx_L3_error) } __pyx_v_devPtr = values[0]; __pyx_v_value = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_value == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22192, __pyx_L3_error) __pyx_v_count = __Pyx_PyLong_As_size_t(values[2]); if (unlikely((__pyx_v_count == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 22192, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemset", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 22191, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemset", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_262cudaMemset(__pyx_self, __pyx_v_devPtr, __pyx_v_value, __pyx_v_count); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_262cudaMemset(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_devPtr, int __pyx_v_value, size_t __pyx_v_count) { struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cydevPtr = NULL; void *__pyx_v_cydevPtr_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_5; cudaError_t __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemset", 0); /* "cuda/bindings/runtime.pyx":22219 * :py:obj:`~.cuMemsetD8`, :py:obj:`~.cuMemsetD16`, :py:obj:`~.cuMemsetD32` * """ * cydevPtr = _HelperInputVoidPtr(devPtr) # <<<<<<<<<<<<<< * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_devPtr}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22219, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_cydevPtr = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":22220 * """ * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemset(cydevPtr_ptr, value, count) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cydevPtr), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22220, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_5 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 22220, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cydevPtr_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_5)); /* "cuda/bindings/runtime.pyx":22221 * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemset(cydevPtr_ptr, value, count) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":22222 * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: * err = cyruntime.cudaMemset(cydevPtr_ptr, value, count) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_6 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemset(__pyx_v_cydevPtr_ptr, __pyx_v_value, __pyx_v_count); if (unlikely(__pyx_t_6 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22222, __pyx_L4_error) __pyx_v_err = __pyx_t_6; } /* "cuda/bindings/runtime.pyx":22221 * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemset(cydevPtr_ptr, value, count) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":22223 * with nogil: * err = cyruntime.cudaMemset(cydevPtr_ptr, value, count) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22223, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22223, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 22223, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22223, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 22223, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":22191 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemset(devPtr, int value, size_t count): * """ Initializes or sets device memory to a value. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemset", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_cydevPtr); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":22225 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemset2D(devPtr, size_t pitch, int value, size_t width, size_t height): * """ Initializes or sets device memory to a value. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_265cudaMemset2D(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_264cudaMemset2D, "cudaMemset2D(devPtr, size_t pitch, int value, size_t width, size_t height)\n\nInitializes or sets device memory to a value.\n\nSets to the specified value `value` a matrix (`height` rows of `width`\nbytes each) pointed to by `dstPtr`. `pitch` is the width in bytes of\nthe 2D array pointed to by `dstPtr`, including any padding added to the\nend of each row. This function performs fastest when the pitch is one\nthat has been passed back by :py:obj:`~.cudaMallocPitch()`.\n\nNote that this function is asynchronous with respect to the host unless\n`devPtr` refers to pinned host memory.\n\nParameters\n----------\ndevPtr : Any\n Pointer to 2D device memory\npitch : size_t\n Pitch in bytes of 2D device memory(Unused if `height` is 1)\nvalue : int\n Value to set for each byte of specified memory\nwidth : size_t\n Width of matrix set (columns in bytes)\nheight : size_t\n Height of matrix set (rows)\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`,\n\nSee Also\n--------\n:py:obj:`~.cudaMemset`, :py:obj:`~.cudaMemset3D`, :py:obj:`~.cudaMemsetAsync`, :py:obj:`~.cudaMemset2DAsync`, :py:obj:`~.cudaMemset3DAsync`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D32`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_265cudaMemset2D = {"cudaMemset2D", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_265cudaMemset2D, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_264cudaMemset2D}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_265cudaMemset2D(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_devPtr = 0; size_t __pyx_v_pitch; int __pyx_v_value; size_t __pyx_v_width; size_t __pyx_v_height; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[5] = {0,0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemset2D (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_devPtr_2,&__pyx_mstate_global->__pyx_n_u_pitch_2,&__pyx_mstate_global->__pyx_n_u_value,&__pyx_mstate_global->__pyx_n_u_width_2,&__pyx_mstate_global->__pyx_n_u_height_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22225, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 5: values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 22225, __pyx_L3_error) CYTHON_FALLTHROUGH; case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 22225, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22225, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22225, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22225, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemset2D", 0) < (0)) __PYX_ERR(0, 22225, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 5; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemset2D", 1, 5, 5, i); __PYX_ERR(0, 22225, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 5)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22225, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22225, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22225, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 22225, __pyx_L3_error) values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 22225, __pyx_L3_error) } __pyx_v_devPtr = values[0]; __pyx_v_pitch = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_pitch == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 22226, __pyx_L3_error) __pyx_v_value = __Pyx_PyLong_As_int(values[2]); if (unlikely((__pyx_v_value == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22226, __pyx_L3_error) __pyx_v_width = __Pyx_PyLong_As_size_t(values[3]); if (unlikely((__pyx_v_width == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 22226, __pyx_L3_error) __pyx_v_height = __Pyx_PyLong_As_size_t(values[4]); if (unlikely((__pyx_v_height == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 22226, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemset2D", 1, 5, 5, __pyx_nargs); __PYX_ERR(0, 22225, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemset2D", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_264cudaMemset2D(__pyx_self, __pyx_v_devPtr, __pyx_v_pitch, __pyx_v_value, __pyx_v_width, __pyx_v_height); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_264cudaMemset2D(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_devPtr, size_t __pyx_v_pitch, int __pyx_v_value, size_t __pyx_v_width, size_t __pyx_v_height) { struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cydevPtr = NULL; void *__pyx_v_cydevPtr_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_5; cudaError_t __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemset2D", 0); /* "cuda/bindings/runtime.pyx":22260 * :py:obj:`~.cudaMemset`, :py:obj:`~.cudaMemset3D`, :py:obj:`~.cudaMemsetAsync`, :py:obj:`~.cudaMemset2DAsync`, :py:obj:`~.cudaMemset3DAsync`, :py:obj:`~.cuMemsetD2D8`, :py:obj:`~.cuMemsetD2D16`, :py:obj:`~.cuMemsetD2D32` * """ * cydevPtr = _HelperInputVoidPtr(devPtr) # <<<<<<<<<<<<<< * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_devPtr}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22260, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_cydevPtr = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":22261 * """ * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemset2D(cydevPtr_ptr, pitch, value, width, height) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cydevPtr), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22261, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_5 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 22261, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cydevPtr_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_5)); /* "cuda/bindings/runtime.pyx":22262 * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemset2D(cydevPtr_ptr, pitch, value, width, height) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":22263 * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: * err = cyruntime.cudaMemset2D(cydevPtr_ptr, pitch, value, width, height) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_6 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemset2D(__pyx_v_cydevPtr_ptr, __pyx_v_pitch, __pyx_v_value, __pyx_v_width, __pyx_v_height); if (unlikely(__pyx_t_6 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22263, __pyx_L4_error) __pyx_v_err = __pyx_t_6; } /* "cuda/bindings/runtime.pyx":22262 * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemset2D(cydevPtr_ptr, pitch, value, width, height) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":22264 * with nogil: * err = cyruntime.cudaMemset2D(cydevPtr_ptr, pitch, value, width, height) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22264, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22264, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 22264, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22264, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 22264, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":22225 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemset2D(devPtr, size_t pitch, int value, size_t width, size_t height): * """ Initializes or sets device memory to a value. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemset2D", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_cydevPtr); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":22266 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemset3D(pitchedDevPtr not None : cudaPitchedPtr, int value, extent not None : cudaExtent): * """ Initializes or sets device memory to a value. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_267cudaMemset3D(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_266cudaMemset3D, "cudaMemset3D(cudaPitchedPtr pitchedDevPtr: cudaPitchedPtr, int value, cudaExtent extent: cudaExtent)\n\nInitializes or sets device memory to a value.\n\nInitializes each element of a 3D array to the specified value `value`.\nThe object to initialize is defined by `pitchedDevPtr`. The `pitch`\nfield of `pitchedDevPtr` is the width in memory in bytes of the 3D\narray pointed to by `pitchedDevPtr`, including any padding added to the\nend of each row. The `xsize` field specifies the logical width of each\nrow in bytes, while the `ysize` field specifies the height of each 2D\nslice in rows. The `pitch` field of `pitchedDevPtr` is ignored when\n`height` and `depth` are both equal to 1.\n\nThe extents of the initialized region are specified as a `width` in\nbytes, a `height` in rows, and a `depth` in slices.\n\nExtents with `width` greater than or equal to the `xsize` of\n`pitchedDevPtr` may perform significantly faster than extents narrower\nthan the `xsize`. Secondarily, extents with `height` equal to the\n`ysize` of `pitchedDevPtr` will perform faster than when the `height`\nis shorter than the `ysize`.\n\nThis function performs fastest when the `pitchedDevPtr` has been\nallocated by :py:obj:`~.cudaMalloc3D()`.\n\nNote that this function is asynchronous with respect to the host unless\n`pitchedDevPtr` refers to pinned host memory.\n\nParameters\n----------\npitchedDevPtr : :py:obj:`~.cudaPitchedPtr`\n Pointer to pitched device memory\nvalue : int\n Value to set for each byte of specified memory\nextent : :py:obj:`~.cudaExtent`\n Size parameters for where to set device memory (`width` field in\n bytes)\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`,\n\nSee Also\n--------\n:py:obj:`~.cudaMemset`, :py:obj:`~.cudaMemset2D`, :py:obj:`~.cudaMemsetAsync`, :py:obj:`~.cudaMemset2DAsync`, :py:obj:`~.cudaMemset3DAsync`, :py:obj:`~.cudaMalloc3D`, :py:obj:`~.make_cudaPitchedPtr`, :py:obj:`~.make_cudaExtent`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_267cudaMemset3D = {"cudaMemset3D", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_267cudaMemset3D, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_266cudaMemset3D}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_267cudaMemset3D(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaPitchedPtr *__pyx_v_pitchedDevPtr = 0; int __pyx_v_value; struct __pyx_obj_4cuda_8bindings_7runtime_cudaExtent *__pyx_v_extent = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[3] = {0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemset3D (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pitchedDevPtr,&__pyx_mstate_global->__pyx_n_u_value,&__pyx_mstate_global->__pyx_n_u_extent_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22266, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22266, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22266, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22266, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemset3D", 0) < (0)) __PYX_ERR(0, 22266, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 3; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemset3D", 1, 3, 3, i); __PYX_ERR(0, 22266, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 3)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22266, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22266, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22266, __pyx_L3_error) } __pyx_v_pitchedDevPtr = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaPitchedPtr *)values[0]); __pyx_v_value = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_value == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22267, __pyx_L3_error) __pyx_v_extent = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaExtent *)values[2]); } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemset3D", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 22266, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemset3D", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_pitchedDevPtr), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaPitchedPtr, 0, "pitchedDevPtr", 0))) __PYX_ERR(0, 22267, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_extent), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaExtent, 0, "extent", 0))) __PYX_ERR(0, 22267, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_266cudaMemset3D(__pyx_self, __pyx_v_pitchedDevPtr, __pyx_v_value, __pyx_v_extent); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_266cudaMemset3D(CYTHON_UNUSED PyObject *__pyx_self, struct __pyx_obj_4cuda_8bindings_7runtime_cudaPitchedPtr *__pyx_v_pitchedDevPtr, int __pyx_v_value, struct __pyx_obj_4cuda_8bindings_7runtime_cudaExtent *__pyx_v_extent) { cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations cudaError_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemset3D", 0); /* "cuda/bindings/runtime.pyx":22313 * :py:obj:`~.cudaMemset`, :py:obj:`~.cudaMemset2D`, :py:obj:`~.cudaMemsetAsync`, :py:obj:`~.cudaMemset2DAsync`, :py:obj:`~.cudaMemset3DAsync`, :py:obj:`~.cudaMalloc3D`, :py:obj:`~.make_cudaPitchedPtr`, :py:obj:`~.make_cudaExtent` * """ * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemset3D(pitchedDevPtr._pvt_ptr[0], value, extent._pvt_ptr[0]) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":22314 * """ * with nogil: * err = cyruntime.cudaMemset3D(pitchedDevPtr._pvt_ptr[0], value, extent._pvt_ptr[0]) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_1 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemset3D((__pyx_v_pitchedDevPtr->_pvt_ptr[0]), __pyx_v_value, (__pyx_v_extent->_pvt_ptr[0])); if (unlikely(__pyx_t_1 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22314, __pyx_L4_error) __pyx_v_err = __pyx_t_1; } /* "cuda/bindings/runtime.pyx":22313 * :py:obj:`~.cudaMemset`, :py:obj:`~.cudaMemset2D`, :py:obj:`~.cudaMemsetAsync`, :py:obj:`~.cudaMemset2DAsync`, :py:obj:`~.cudaMemset3DAsync`, :py:obj:`~.cudaMalloc3D`, :py:obj:`~.make_cudaPitchedPtr`, :py:obj:`~.make_cudaExtent` * """ * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemset3D(pitchedDevPtr._pvt_ptr[0], value, extent._pvt_ptr[0]) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":22315 * with nogil: * err = cyruntime.cudaMemset3D(pitchedDevPtr._pvt_ptr[0], value, extent._pvt_ptr[0]) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 22315, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22315, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22315, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22315, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 22315, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":22266 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemset3D(pitchedDevPtr not None : cudaPitchedPtr, int value, extent not None : cudaExtent): * """ Initializes or sets device memory to a value. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemset3D", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":22317 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemsetAsync(devPtr, int value, size_t count, stream): * """ Initializes or sets device memory to a value. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_269cudaMemsetAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_268cudaMemsetAsync, "cudaMemsetAsync(devPtr, int value, size_t count, stream)\n\nInitializes or sets device memory to a value.\n\nFills the first `count` bytes of the memory area pointed to by `devPtr`\nwith the constant byte value `value`.\n\n:py:obj:`~.cudaMemsetAsync()` is asynchronous with respect to the host,\nso the call may return before the memset is complete. The operation can\noptionally be associated to a stream by passing a non-zero `stream`\nargument. If `stream` is non-zero, the operation may overlap with\noperations in other streams.\n\nThe device version of this function only handles device to device\ncopies and cannot be given local or shared pointers.\n\nParameters\n----------\ndevPtr : Any\n Pointer to device memory\nvalue : int\n Value to set for each byte of specified memory\ncount : size_t\n Size in bytes to set\nstream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Stream identifier\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`,\n\nSee Also\n--------\n:py:obj:`~.cudaMemset`, :py:obj:`~.cudaMemset2D`, :py:obj:`~.cudaMemset3D`, :py:obj:`~.cudaMemset2DAsync`, :py:obj:`~.cudaMemset3DAsync`, :py:obj:`~.cuMemsetD8Async`, :py:obj:`~.cuMemsetD16Async`, :py:obj:`~.cuMemsetD32Async`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_269cudaMemsetAsync = {"cudaMemsetAsync", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_269cudaMemsetAsync, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_268cudaMemsetAsync}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_269cudaMemsetAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_devPtr = 0; int __pyx_v_value; size_t __pyx_v_count; PyObject *__pyx_v_stream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[4] = {0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemsetAsync (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_devPtr_2,&__pyx_mstate_global->__pyx_n_u_value,&__pyx_mstate_global->__pyx_n_u_count,&__pyx_mstate_global->__pyx_n_u_stream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22317, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 22317, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22317, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22317, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22317, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemsetAsync", 0) < (0)) __PYX_ERR(0, 22317, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 4; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemsetAsync", 1, 4, 4, i); __PYX_ERR(0, 22317, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 4)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22317, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22317, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22317, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 22317, __pyx_L3_error) } __pyx_v_devPtr = values[0]; __pyx_v_value = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_value == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22318, __pyx_L3_error) __pyx_v_count = __Pyx_PyLong_As_size_t(values[2]); if (unlikely((__pyx_v_count == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 22318, __pyx_L3_error) __pyx_v_stream = values[3]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemsetAsync", 1, 4, 4, __pyx_nargs); __PYX_ERR(0, 22317, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemsetAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_268cudaMemsetAsync(__pyx_self, __pyx_v_devPtr, __pyx_v_value, __pyx_v_count, __pyx_v_stream); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_268cudaMemsetAsync(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_devPtr, int __pyx_v_value, size_t __pyx_v_count, PyObject *__pyx_v_stream) { cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cydevPtr = NULL; void *__pyx_v_cydevPtr_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemsetAsync", 0); /* "cuda/bindings/runtime.pyx":22354 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":22355 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":22354 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":22356 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":22357 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22357, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":22356 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":22359 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * cydevPtr = _HelperInputVoidPtr(devPtr) */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_stream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22359, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22359, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":22360 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 22360, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":22361 * pstream = int(cudaStream_t(stream)) * cystream = pstream * cydevPtr = _HelperInputVoidPtr(devPtr) # <<<<<<<<<<<<<< * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_devPtr}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22361, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_cydevPtr = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":22362 * cystream = pstream * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemsetAsync(cydevPtr_ptr, value, count, cystream) */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cydevPtr), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22362, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_5); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 22362, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_cydevPtr_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":22363 * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemsetAsync(cydevPtr_ptr, value, count, cystream) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":22364 * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: * err = cyruntime.cudaMemsetAsync(cydevPtr_ptr, value, count, cystream) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemsetAsync(__pyx_v_cydevPtr_ptr, __pyx_v_value, __pyx_v_count, __pyx_v_cystream); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22364, __pyx_L7_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":22363 * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemsetAsync(cydevPtr_ptr, value, count, cystream) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":22365 * with nogil: * err = cyruntime.cudaMemsetAsync(cydevPtr_ptr, value, count, cystream) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22365, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22365, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22365, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22365, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 22365, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":22317 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemsetAsync(devPtr, int value, size_t count, stream): * """ Initializes or sets device memory to a value. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemsetAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XDECREF((PyObject *)__pyx_v_cydevPtr); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":22367 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemset2DAsync(devPtr, size_t pitch, int value, size_t width, size_t height, stream): * """ Initializes or sets device memory to a value. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_271cudaMemset2DAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_270cudaMemset2DAsync, "cudaMemset2DAsync(devPtr, size_t pitch, int value, size_t width, size_t height, stream)\n\nInitializes or sets device memory to a value.\n\nSets to the specified value `value` a matrix (`height` rows of `width`\nbytes each) pointed to by `dstPtr`. `pitch` is the width in bytes of\nthe 2D array pointed to by `dstPtr`, including any padding added to the\nend of each row. This function performs fastest when the pitch is one\nthat has been passed back by :py:obj:`~.cudaMallocPitch()`.\n\n:py:obj:`~.cudaMemset2DAsync()` is asynchronous with respect to the\nhost, so the call may return before the memset is complete. The\noperation can optionally be associated to a stream by passing a non-\nzero `stream` argument. If `stream` is non-zero, the operation may\noverlap with operations in other streams.\n\nThe device version of this function only handles device to device\ncopies and cannot be given local or shared pointers.\n\nParameters\n----------\ndevPtr : Any\n Pointer to 2D device memory\npitch : size_t\n Pitch in bytes of 2D device memory(Unused if `height` is 1)\nvalue : int\n Value to set for each byte of specified memory\nwidth : size_t\n Width of matrix set (columns in bytes)\nheight : size_t\n Height of matrix set (rows)\nstream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Stream identifier\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`,\n\nSee Also\n--------\n:py:obj:`~.cudaMemset`, :py:obj:`~.cudaMemset2D`, :py:obj:`~.cudaMemset3D`, :py:obj:`~.cudaMemsetAsync`, :py:obj:`~.cudaMemset3DAsync`, :py:obj:`~.cuMemsetD2D8Async`, :py:obj:`~.cuMemsetD2D16Async`, :py:obj:`~.cuMemsetD2D32Async`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_271cudaMemset2DAsync = {"cudaMemset2DAsync", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_271cudaMemset2DAsync, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_270cudaMemset2DAsync}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_271cudaMemset2DAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_devPtr = 0; size_t __pyx_v_pitch; int __pyx_v_value; size_t __pyx_v_width; size_t __pyx_v_height; PyObject *__pyx_v_stream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[6] = {0,0,0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemset2DAsync (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_devPtr_2,&__pyx_mstate_global->__pyx_n_u_pitch_2,&__pyx_mstate_global->__pyx_n_u_value,&__pyx_mstate_global->__pyx_n_u_width_2,&__pyx_mstate_global->__pyx_n_u_height_2,&__pyx_mstate_global->__pyx_n_u_stream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22367, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 6: values[5] = __Pyx_ArgRef_FASTCALL(__pyx_args, 5); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[5])) __PYX_ERR(0, 22367, __pyx_L3_error) CYTHON_FALLTHROUGH; case 5: values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 22367, __pyx_L3_error) CYTHON_FALLTHROUGH; case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 22367, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22367, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22367, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22367, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemset2DAsync", 0) < (0)) __PYX_ERR(0, 22367, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 6; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemset2DAsync", 1, 6, 6, i); __PYX_ERR(0, 22367, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 6)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22367, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22367, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22367, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 22367, __pyx_L3_error) values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 22367, __pyx_L3_error) values[5] = __Pyx_ArgRef_FASTCALL(__pyx_args, 5); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[5])) __PYX_ERR(0, 22367, __pyx_L3_error) } __pyx_v_devPtr = values[0]; __pyx_v_pitch = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_pitch == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 22368, __pyx_L3_error) __pyx_v_value = __Pyx_PyLong_As_int(values[2]); if (unlikely((__pyx_v_value == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22368, __pyx_L3_error) __pyx_v_width = __Pyx_PyLong_As_size_t(values[3]); if (unlikely((__pyx_v_width == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 22368, __pyx_L3_error) __pyx_v_height = __Pyx_PyLong_As_size_t(values[4]); if (unlikely((__pyx_v_height == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 22368, __pyx_L3_error) __pyx_v_stream = values[5]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemset2DAsync", 1, 6, 6, __pyx_nargs); __PYX_ERR(0, 22367, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemset2DAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_270cudaMemset2DAsync(__pyx_self, __pyx_v_devPtr, __pyx_v_pitch, __pyx_v_value, __pyx_v_width, __pyx_v_height, __pyx_v_stream); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_270cudaMemset2DAsync(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_devPtr, size_t __pyx_v_pitch, int __pyx_v_value, size_t __pyx_v_width, size_t __pyx_v_height, PyObject *__pyx_v_stream) { cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cydevPtr = NULL; void *__pyx_v_cydevPtr_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemset2DAsync", 0); /* "cuda/bindings/runtime.pyx":22411 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":22412 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":22411 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":22413 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":22414 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22414, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":22413 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":22416 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * cydevPtr = _HelperInputVoidPtr(devPtr) */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_stream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22416, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22416, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":22417 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 22417, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":22418 * pstream = int(cudaStream_t(stream)) * cystream = pstream * cydevPtr = _HelperInputVoidPtr(devPtr) # <<<<<<<<<<<<<< * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_devPtr}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22418, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_cydevPtr = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":22419 * cystream = pstream * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemset2DAsync(cydevPtr_ptr, pitch, value, width, height, cystream) */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cydevPtr), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22419, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_5); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 22419, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_cydevPtr_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":22420 * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemset2DAsync(cydevPtr_ptr, pitch, value, width, height, cystream) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":22421 * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: * err = cyruntime.cudaMemset2DAsync(cydevPtr_ptr, pitch, value, width, height, cystream) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemset2DAsync(__pyx_v_cydevPtr_ptr, __pyx_v_pitch, __pyx_v_value, __pyx_v_width, __pyx_v_height, __pyx_v_cystream); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22421, __pyx_L7_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":22420 * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemset2DAsync(cydevPtr_ptr, pitch, value, width, height, cystream) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":22422 * with nogil: * err = cyruntime.cudaMemset2DAsync(cydevPtr_ptr, pitch, value, width, height, cystream) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22422, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22422, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22422, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22422, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 22422, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":22367 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemset2DAsync(devPtr, size_t pitch, int value, size_t width, size_t height, stream): * """ Initializes or sets device memory to a value. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemset2DAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XDECREF((PyObject *)__pyx_v_cydevPtr); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":22424 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemset3DAsync(pitchedDevPtr not None : cudaPitchedPtr, int value, extent not None : cudaExtent, stream): * """ Initializes or sets device memory to a value. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_273cudaMemset3DAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_272cudaMemset3DAsync, "cudaMemset3DAsync(cudaPitchedPtr pitchedDevPtr: cudaPitchedPtr, int value, cudaExtent extent: cudaExtent, stream)\n\nInitializes or sets device memory to a value.\n\nInitializes each element of a 3D array to the specified value `value`.\nThe object to initialize is defined by `pitchedDevPtr`. The `pitch`\nfield of `pitchedDevPtr` is the width in memory in bytes of the 3D\narray pointed to by `pitchedDevPtr`, including any padding added to the\nend of each row. The `xsize` field specifies the logical width of each\nrow in bytes, while the `ysize` field specifies the height of each 2D\nslice in rows. The `pitch` field of `pitchedDevPtr` is ignored when\n`height` and `depth` are both equal to 1.\n\nThe extents of the initialized region are specified as a `width` in\nbytes, a `height` in rows, and a `depth` in slices.\n\nExtents with `width` greater than or equal to the `xsize` of\n`pitchedDevPtr` may perform significantly faster than extents narrower\nthan the `xsize`. Secondarily, extents with `height` equal to the\n`ysize` of `pitchedDevPtr` will perform faster than when the `height`\nis shorter than the `ysize`.\n\nThis function performs fastest when the `pitchedDevPtr` has been\nallocated by :py:obj:`~.cudaMalloc3D()`.\n\n:py:obj:`~.cudaMemset3DAsync()` is asynchronous with respect to the\nhost, so the call may return before the memset is complete. The\noperation can optionally be associated to a stream by passing a non-\nzero `stream` argument. If `stream` is non-zero, the operation may\noverlap with operations in other streams.\n\nThe device version of this function only handles device to device\ncopies and cannot be given local or shared pointers.\n\nParameters\n----------\npitchedDevPtr : :py:obj:`~.cudaPitchedPtr`\n Pointer to pitched device memory\nvalue : int\n Value to set for each byte of specified memory\nextent : :py:obj:`~.cudaExtent`\n Size parameters for where to set device memory (`width` field in\n bytes)\nstream : :py:obj:`~.CUstream` o""r :py:obj:`~.cudaStream_t`\n Stream identifier\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`,\n\nSee Also\n--------\n:py:obj:`~.cudaMemset`, :py:obj:`~.cudaMemset2D`, :py:obj:`~.cudaMemset3D`, :py:obj:`~.cudaMemsetAsync`, :py:obj:`~.cudaMemset2DAsync`, :py:obj:`~.cudaMalloc3D`, :py:obj:`~.make_cudaPitchedPtr`, :py:obj:`~.make_cudaExtent`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_273cudaMemset3DAsync = {"cudaMemset3DAsync", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_273cudaMemset3DAsync, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_272cudaMemset3DAsync}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_273cudaMemset3DAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaPitchedPtr *__pyx_v_pitchedDevPtr = 0; int __pyx_v_value; struct __pyx_obj_4cuda_8bindings_7runtime_cudaExtent *__pyx_v_extent = 0; PyObject *__pyx_v_stream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[4] = {0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemset3DAsync (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pitchedDevPtr,&__pyx_mstate_global->__pyx_n_u_value,&__pyx_mstate_global->__pyx_n_u_extent_2,&__pyx_mstate_global->__pyx_n_u_stream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22424, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 22424, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22424, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22424, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22424, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemset3DAsync", 0) < (0)) __PYX_ERR(0, 22424, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 4; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemset3DAsync", 1, 4, 4, i); __PYX_ERR(0, 22424, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 4)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22424, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22424, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22424, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 22424, __pyx_L3_error) } __pyx_v_pitchedDevPtr = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaPitchedPtr *)values[0]); __pyx_v_value = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_value == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22425, __pyx_L3_error) __pyx_v_extent = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaExtent *)values[2]); __pyx_v_stream = values[3]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemset3DAsync", 1, 4, 4, __pyx_nargs); __PYX_ERR(0, 22424, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemset3DAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_pitchedDevPtr), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaPitchedPtr, 0, "pitchedDevPtr", 0))) __PYX_ERR(0, 22425, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_extent), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaExtent, 0, "extent", 0))) __PYX_ERR(0, 22425, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_272cudaMemset3DAsync(__pyx_self, __pyx_v_pitchedDevPtr, __pyx_v_value, __pyx_v_extent, __pyx_v_stream); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_272cudaMemset3DAsync(CYTHON_UNUSED PyObject *__pyx_self, struct __pyx_obj_4cuda_8bindings_7runtime_cudaPitchedPtr *__pyx_v_pitchedDevPtr, int __pyx_v_value, struct __pyx_obj_4cuda_8bindings_7runtime_cudaExtent *__pyx_v_extent, PyObject *__pyx_v_stream) { cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemset3DAsync", 0); /* "cuda/bindings/runtime.pyx":22480 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":22481 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":22480 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":22482 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":22483 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":22482 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":22485 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * with nogil: */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_stream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22485, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22485, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":22486 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemset3DAsync(pitchedDevPtr._pvt_ptr[0], value, extent._pvt_ptr[0], cystream) */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 22486, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":22487 * pstream = int(cudaStream_t(stream)) * cystream = pstream * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemset3DAsync(pitchedDevPtr._pvt_ptr[0], value, extent._pvt_ptr[0], cystream) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":22488 * cystream = pstream * with nogil: * err = cyruntime.cudaMemset3DAsync(pitchedDevPtr._pvt_ptr[0], value, extent._pvt_ptr[0], cystream) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemset3DAsync((__pyx_v_pitchedDevPtr->_pvt_ptr[0]), __pyx_v_value, (__pyx_v_extent->_pvt_ptr[0]), __pyx_v_cystream); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22488, __pyx_L7_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":22487 * pstream = int(cudaStream_t(stream)) * cystream = pstream * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemset3DAsync(pitchedDevPtr._pvt_ptr[0], value, extent._pvt_ptr[0], cystream) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":22489 * with nogil: * err = cyruntime.cudaMemset3DAsync(pitchedDevPtr._pvt_ptr[0], value, extent._pvt_ptr[0], cystream) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22489, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22489, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22489, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22489, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 22489, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":22424 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemset3DAsync(pitchedDevPtr not None : cudaPitchedPtr, int value, extent not None : cudaExtent, stream): * """ Initializes or sets device memory to a value. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemset3DAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":22491 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemPrefetchAsync(devPtr, size_t count, int dstDevice, stream): * """ Prefetches memory to the specified destination device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_275cudaMemPrefetchAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_274cudaMemPrefetchAsync, "cudaMemPrefetchAsync(devPtr, size_t count, int dstDevice, stream)\n\nPrefetches memory to the specified destination device.\n\nPrefetches memory to the specified destination device. `devPtr` is the\nbase device pointer of the memory to be prefetched and `dstDevice` is\nthe destination device. `count` specifies the number of bytes to copy.\n`stream` is the stream in which the operation is enqueued. The memory\nrange must refer to managed memory allocated via\n:py:obj:`~.cudaMallocManaged` or declared via managed variables, or it\nmay also refer to system-allocated memory on systems with non-zero\ncudaDevAttrPageableMemoryAccess.\n\nPassing in cudaCpuDeviceId for `dstDevice` will prefetch the data to\nhost memory. If `dstDevice` is a GPU, then the device attribute\n:py:obj:`~.cudaDevAttrConcurrentManagedAccess` must be non-zero.\nAdditionally, `stream` must be associated with a device that has a non-\nzero value for the device attribute\n:py:obj:`~.cudaDevAttrConcurrentManagedAccess`.\n\nThe start address and end address of the memory range will be rounded\ndown and rounded up respectively to be aligned to CPU page size before\nthe prefetch operation is enqueued in the stream.\n\nIf no physical memory has been allocated for this region, then this\nmemory region will be populated and mapped on the destination device.\nIf there's insufficient memory to prefetch the desired region, the\nUnified Memory driver may evict pages from other\n:py:obj:`~.cudaMallocManaged` allocations to host memory in order to\nmake room. Device memory allocated using :py:obj:`~.cudaMalloc` or\n:py:obj:`~.cudaMallocArray` will not be evicted.\n\nBy default, any mappings to the previous location of the migrated pages\nare removed and mappings for the new location are only setup on\n`dstDevice`. The exact behavior however also depends on the settings\napplied to this memory range via :py:obj:`~.cudaMemAdvise` as described\nbelow:\n\nIf :py:obj:`~.cudaMemAdviseSetReadMostly` was set on any subset ""of this\nmemory range, then that subset will create a read-only copy of the\npages on `dstDevice`.\n\nIf :py:obj:`~.cudaMemAdviseSetPreferredLocation` was called on any\nsubset of this memory range, then the pages will be migrated to\n`dstDevice` even if `dstDevice` is not the preferred location of any\npages in the memory range.\n\nIf :py:obj:`~.cudaMemAdviseSetAccessedBy` was called on any subset of\nthis memory range, then mappings to those pages from all the\nappropriate processors are updated to refer to the new location if\nestablishing such a mapping is possible. Otherwise, those mappings are\ncleared.\n\nNote that this API is not required for functionality and only serves to\nimprove performance by allowing the application to migrate data to a\nsuitable location before it is accessed. Memory accesses to this range\nare always coherent and are allowed even when the data is actively\nbeing migrated.\n\nNote that this function is asynchronous with respect to the host and\nall work on other devices.\n\nParameters\n----------\ndevPtr : Any\n Pointer to be prefetched\ncount : size_t\n Size in bytes\ndstDevice : int\n Destination device to prefetch to\nstream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Stream to enqueue prefetch operation\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidDevice`\n\nSee Also\n--------\n:py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpyPeer`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy3DPeerAsync`, :py:obj:`~.cudaMemAdvise`, :py:obj:`~.cudaMemAdvise_v2` :py:obj:`~.cuMemPrefetchAsync`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_275cudaMemPrefetchAsync = {"cudaMemPrefetchAsync", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_275cudaMemPrefetchAsync, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_274cudaMemPrefetchAsync}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_275cudaMemPrefetchAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_devPtr = 0; size_t __pyx_v_count; int __pyx_v_dstDevice; PyObject *__pyx_v_stream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[4] = {0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemPrefetchAsync (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_devPtr_2,&__pyx_mstate_global->__pyx_n_u_count,&__pyx_mstate_global->__pyx_n_u_dstDevice_2,&__pyx_mstate_global->__pyx_n_u_stream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22491, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 22491, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22491, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22491, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22491, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemPrefetchAsync", 0) < (0)) __PYX_ERR(0, 22491, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 4; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemPrefetchAsync", 1, 4, 4, i); __PYX_ERR(0, 22491, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 4)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22491, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22491, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22491, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 22491, __pyx_L3_error) } __pyx_v_devPtr = values[0]; __pyx_v_count = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_count == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 22492, __pyx_L3_error) __pyx_v_dstDevice = __Pyx_PyLong_As_int(values[2]); if (unlikely((__pyx_v_dstDevice == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22492, __pyx_L3_error) __pyx_v_stream = values[3]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemPrefetchAsync", 1, 4, 4, __pyx_nargs); __PYX_ERR(0, 22491, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemPrefetchAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_274cudaMemPrefetchAsync(__pyx_self, __pyx_v_devPtr, __pyx_v_count, __pyx_v_dstDevice, __pyx_v_stream); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_274cudaMemPrefetchAsync(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_devPtr, size_t __pyx_v_count, int __pyx_v_dstDevice, PyObject *__pyx_v_stream) { cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cydevPtr = NULL; void *__pyx_v_cydevPtr_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemPrefetchAsync", 0); /* "cuda/bindings/runtime.pyx":22574 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":22575 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":22574 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":22576 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":22577 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":22576 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":22579 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * cydevPtr = _HelperInputVoidPtr(devPtr) */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_stream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22579, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":22580 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 22580, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":22581 * pstream = int(cudaStream_t(stream)) * cystream = pstream * cydevPtr = _HelperInputVoidPtr(devPtr) # <<<<<<<<<<<<<< * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_devPtr}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22581, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_cydevPtr = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":22582 * cystream = pstream * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemPrefetchAsync(cydevPtr_ptr, count, dstDevice, cystream) */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cydevPtr), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22582, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_5); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 22582, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_cydevPtr_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":22583 * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemPrefetchAsync(cydevPtr_ptr, count, dstDevice, cystream) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":22584 * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: * err = cyruntime.cudaMemPrefetchAsync(cydevPtr_ptr, count, dstDevice, cystream) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemPrefetchAsync(__pyx_v_cydevPtr_ptr, __pyx_v_count, __pyx_v_dstDevice, __pyx_v_cystream); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22584, __pyx_L7_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":22583 * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemPrefetchAsync(cydevPtr_ptr, count, dstDevice, cystream) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":22585 * with nogil: * err = cyruntime.cudaMemPrefetchAsync(cydevPtr_ptr, count, dstDevice, cystream) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22585, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22585, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22585, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22585, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 22585, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":22491 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemPrefetchAsync(devPtr, size_t count, int dstDevice, stream): * """ Prefetches memory to the specified destination device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemPrefetchAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XDECREF((PyObject *)__pyx_v_cydevPtr); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":22587 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemPrefetchAsync_v2(devPtr, size_t count, location not None : cudaMemLocation, unsigned int flags, stream): * """ Prefetches memory to the specified destination location. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_277cudaMemPrefetchAsync_v2(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_276cudaMemPrefetchAsync_v2, "cudaMemPrefetchAsync_v2(devPtr, size_t count, cudaMemLocation location: cudaMemLocation, unsigned int flags, stream)\n\nPrefetches memory to the specified destination location.\n\nPrefetches memory to the specified destination location. `devPtr` is\nthe base device pointer of the memory to be prefetched and `location`\nspecifies the destination location. `count` specifies the number of\nbytes to copy. `stream` is the stream in which the operation is\nenqueued. The memory range must refer to managed memory allocated via\n:py:obj:`~.cudaMallocManaged` or declared via managed variables, or it\nmay also refer to system-allocated memory on systems with non-zero\ncudaDevAttrPageableMemoryAccess.\n\nSpecifying :py:obj:`~.cudaMemLocationTypeDevice` for\n:py:obj:`~.cudaMemLocation.type` will prefetch memory to GPU specified\nby device ordinal :py:obj:`~.cudaMemLocation.id` which must have non-\nzero value for the device attribute\n:py:obj:`~.concurrentManagedAccess`. Additionally, `stream` must be\nassociated with a device that has a non-zero value for the device\nattribute :py:obj:`~.concurrentManagedAccess`. Specifying\n:py:obj:`~.cudaMemLocationTypeHost` as :py:obj:`~.cudaMemLocation.type`\nwill prefetch data to host memory. Applications can request prefetching\nmemory to a specific host NUMA node by specifying\n:py:obj:`~.cudaMemLocationTypeHostNuma` for\n:py:obj:`~.cudaMemLocation.type` and a valid host NUMA node id in\n:py:obj:`~.cudaMemLocation.id` Users can also request prefetching\nmemory to the host NUMA node closest to the current thread's CPU by\nspecifying :py:obj:`~.cudaMemLocationTypeHostNumaCurrent` for\n:py:obj:`~.cudaMemLocation.type`. Note when\n:py:obj:`~.cudaMemLocation.type` is etiher\n:py:obj:`~.cudaMemLocationTypeHost` OR\n:py:obj:`~.cudaMemLocationTypeHostNumaCurrent`,\n:py:obj:`~.cudaMemLocation.id` will be ignored.\n\nThe start address and end address of the memory range will be rounded\ndown and rounded up respectively to be aligned to CPU page si""ze before\nthe prefetch operation is enqueued in the stream.\n\nIf no physical memory has been allocated for this region, then this\nmemory region will be populated and mapped on the destination device.\nIf there's insufficient memory to prefetch the desired region, the\nUnified Memory driver may evict pages from other\n:py:obj:`~.cudaMallocManaged` allocations to host memory in order to\nmake room. Device memory allocated using :py:obj:`~.cudaMalloc` or\n:py:obj:`~.cudaMallocArray` will not be evicted.\n\nBy default, any mappings to the previous location of the migrated pages\nare removed and mappings for the new location are only setup on the\ndestination location. The exact behavior however also depends on the\nsettings applied to this memory range via :py:obj:`~.cuMemAdvise` as\ndescribed below:\n\nIf :py:obj:`~.cudaMemAdviseSetReadMostly` was set on any subset of this\nmemory range, then that subset will create a read-only copy of the\npages on destination location. If however the destination location is a\nhost NUMA node, then any pages of that subset that are already in\nanother host NUMA node will be transferred to the destination.\n\nIf :py:obj:`~.cudaMemAdviseSetPreferredLocation` was called on any\nsubset of this memory range, then the pages will be migrated to\n`location` even if `location` is not the preferred location of any\npages in the memory range.\n\nIf :py:obj:`~.cudaMemAdviseSetAccessedBy` was called on any subset of\nthis memory range, then mappings to those pages from all the\nappropriate processors are updated to refer to the new location if\nestablishing such a mapping is possible. Otherwise, those mappings are\ncleared.\n\nNote that this API is not required for functionality and only serves to\nimprove performance by allowing the application to migrate data to a\nsuitable location before it is accessed. Memory accesses to this range\nare always coherent and are allowed even when the data is actively\nbeing migrated.\n\nNote that this functi""on is asynchronous with respect to the host and\nall work on other devices.\n\nParameters\n----------\ndevPtr : Any\n Pointer to be prefetched\ncount : size_t\n Size in bytes\nlocation : :py:obj:`~.cudaMemLocation`\n location to prefetch to\nflags : unsigned int\n flags for future use, must be zero now.\nstream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Stream to enqueue prefetch operation\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidDevice`\n\nSee Also\n--------\n:py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpyPeer`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy3DPeerAsync`, :py:obj:`~.cudaMemAdvise`, :py:obj:`~.cudaMemAdvise_v2` :py:obj:`~.cuMemPrefetchAsync`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_277cudaMemPrefetchAsync_v2 = {"cudaMemPrefetchAsync_v2", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_277cudaMemPrefetchAsync_v2, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_276cudaMemPrefetchAsync_v2}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_277cudaMemPrefetchAsync_v2(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_devPtr = 0; size_t __pyx_v_count; struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemLocation *__pyx_v_location = 0; unsigned int __pyx_v_flags; PyObject *__pyx_v_stream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[5] = {0,0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemPrefetchAsync_v2 (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_devPtr_2,&__pyx_mstate_global->__pyx_n_u_count,&__pyx_mstate_global->__pyx_n_u_location_2,&__pyx_mstate_global->__pyx_n_u_flags_2,&__pyx_mstate_global->__pyx_n_u_stream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22587, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 5: values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 22587, __pyx_L3_error) CYTHON_FALLTHROUGH; case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 22587, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22587, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22587, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22587, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemPrefetchAsync_v2", 0) < (0)) __PYX_ERR(0, 22587, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 5; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemPrefetchAsync_v2", 1, 5, 5, i); __PYX_ERR(0, 22587, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 5)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22587, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22587, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22587, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 22587, __pyx_L3_error) values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 22587, __pyx_L3_error) } __pyx_v_devPtr = values[0]; __pyx_v_count = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_count == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 22588, __pyx_L3_error) __pyx_v_location = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemLocation *)values[2]); __pyx_v_flags = __Pyx_PyLong_As_unsigned_int(values[3]); if (unlikely((__pyx_v_flags == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22588, __pyx_L3_error) __pyx_v_stream = values[4]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemPrefetchAsync_v2", 1, 5, 5, __pyx_nargs); __PYX_ERR(0, 22587, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemPrefetchAsync_v2", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_location), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemLocation, 0, "location", 0))) __PYX_ERR(0, 22588, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_276cudaMemPrefetchAsync_v2(__pyx_self, __pyx_v_devPtr, __pyx_v_count, __pyx_v_location, __pyx_v_flags, __pyx_v_stream); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_276cudaMemPrefetchAsync_v2(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_devPtr, size_t __pyx_v_count, struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemLocation *__pyx_v_location, unsigned int __pyx_v_flags, PyObject *__pyx_v_stream) { cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cydevPtr = NULL; void *__pyx_v_cydevPtr_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemPrefetchAsync_v2", 0); /* "cuda/bindings/runtime.pyx":22688 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":22689 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":22688 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":22690 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":22691 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22691, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":22690 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":22693 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * cydevPtr = _HelperInputVoidPtr(devPtr) */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_stream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22693, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22693, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":22694 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 22694, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":22695 * pstream = int(cudaStream_t(stream)) * cystream = pstream * cydevPtr = _HelperInputVoidPtr(devPtr) # <<<<<<<<<<<<<< * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_devPtr}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22695, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_cydevPtr = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":22696 * cystream = pstream * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemPrefetchAsync_v2(cydevPtr_ptr, count, location._pvt_ptr[0], flags, cystream) */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cydevPtr), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22696, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_5); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 22696, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_cydevPtr_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":22697 * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemPrefetchAsync_v2(cydevPtr_ptr, count, location._pvt_ptr[0], flags, cystream) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":22698 * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: * err = cyruntime.cudaMemPrefetchAsync_v2(cydevPtr_ptr, count, location._pvt_ptr[0], flags, cystream) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemPrefetchAsync_v2(__pyx_v_cydevPtr_ptr, __pyx_v_count, (__pyx_v_location->_pvt_ptr[0]), __pyx_v_flags, __pyx_v_cystream); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22698, __pyx_L7_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":22697 * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemPrefetchAsync_v2(cydevPtr_ptr, count, location._pvt_ptr[0], flags, cystream) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":22699 * with nogil: * err = cyruntime.cudaMemPrefetchAsync_v2(cydevPtr_ptr, count, location._pvt_ptr[0], flags, cystream) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22699, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22699, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22699, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22699, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 22699, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":22587 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemPrefetchAsync_v2(devPtr, size_t count, location not None : cudaMemLocation, unsigned int flags, stream): * """ Prefetches memory to the specified destination location. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemPrefetchAsync_v2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XDECREF((PyObject *)__pyx_v_cydevPtr); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":22701 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemAdvise(devPtr, size_t count, advice not None : cudaMemoryAdvise, int device): * """ Advise about the usage of a given memory range. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_279cudaMemAdvise(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_278cudaMemAdvise, "cudaMemAdvise(devPtr, size_t count, advice: cudaMemoryAdvise, int device)\n\nAdvise about the usage of a given memory range.\n\nAdvise the Unified Memory subsystem about the usage pattern for the\nmemory range starting at `devPtr` with a size of `count` bytes. The\nstart address and end address of the memory range will be rounded down\nand rounded up respectively to be aligned to CPU page size before the\nadvice is applied. The memory range must refer to managed memory\nallocated via :py:obj:`~.cudaMallocManaged` or declared via managed\nvariables. The memory range could also refer to system-allocated\npageable memory provided it represents a valid, host-accessible region\nof memory and all additional constraints imposed by `advice` as\noutlined below are also satisfied. Specifying an invalid system-\nallocated pageable memory range results in an error being returned.\n\nThe `advice` parameter can take the following values:\n\n- :py:obj:`~.cudaMemAdviseSetReadMostly`: This implies that the data is\n mostly going to be read from and only occasionally written to. Any\n read accesses from any processor to this region will create a read-\n only copy of at least the accessed pages in that processor's memory.\n Additionally, if :py:obj:`~.cudaMemPrefetchAsync` is called on this\n region, it will create a read-only copy of the data on the\n destination processor. If any processor writes to this region, all\n copies of the corresponding page will be invalidated except for the\n one where the write occurred. The `device` argument is ignored for\n this advice. Note that for a page to be read-duplicated, the\n accessing processor must either be the CPU or a GPU that has a non-\n zero value for the device attribute\n :py:obj:`~.cudaDevAttrConcurrentManagedAccess`. Also, if a context is\n created on a device that does not have the device attribute\n :py:obj:`~.cudaDevAttrConcurrentManagedAccess` set, then read-\n duplication will not occur until all such contexts ""are destroyed. If\n the memory region refers to valid system-allocated pageable memory,\n then the accessing device must have a non-zero value for the device\n attribute :py:obj:`~.cudaDevAttrPageableMemoryAccess` for a read-only\n copy to be created on that device. Note however that if the accessing\n device also has a non-zero value for the device attribute\n :py:obj:`~.cudaDevAttrPageableMemoryAccessUsesHostPageTables`, then\n setting this advice will not create a read-only copy when that device\n accesses this memory region.\n\n- :py:obj:`~.cudaMemAdviceUnsetReadMostly`: Undoes the effect of\n :py:obj:`~.cudaMemAdviceReadMostly` and also prevents the Unified\n Memory driver from attempting heuristic read-duplication on the\n memory range. Any read-duplicated copies of the data will be\n collapsed into a single copy. The location for the collapsed copy\n will be the preferred location if the page has a preferred location\n and one of the read-duplicated copies was resident at that location.\n Otherwise, the location chosen is arbitrary.\n\n- :py:obj:`~.cudaMemAdviseSetPreferredLocation`: This advice sets the\n preferred location for the data to be the memory belonging to\n `device`. Passing in cudaCpuDeviceId for `device` sets the preferred\n location as host memory. If `device` is a GPU, then it must have a\n non-zero value for the device attribute\n :py:obj:`~.cudaDevAttrConcurrentManagedAccess`. Setting the preferred\n location does not cause data to migrate to that location immediately.\n Instead, it guides the migration policy when a fault occurs on that\n memory region. If the data is already in its preferred location and\n the faulting processor can establish a mapping without requiring the\n data to be migrated, then data migration will be avoided. On the\n other hand, if the data is not in its preferred location or if a\n direct mapping cannot be established, then it will be migrated to the\n processor accessing it. It is impo""rtant to note that setting the\n preferred location does not prevent data prefetching done using\n :py:obj:`~.cudaMemPrefetchAsync`. Having a preferred location can\n override the page thrash detection and resolution logic in the\n Unified Memory driver. Normally, if a page is detected to be\n constantly thrashing between for example host and device memory, the\n page may eventually be pinned to host memory by the Unified Memory\n driver. But if the preferred location is set as device memory, then\n the page will continue to thrash indefinitely. If\n :py:obj:`~.cudaMemAdviseSetReadMostly` is also set on this memory\n region or any subset of it, then the policies associated with that\n advice will override the policies of this advice, unless read\n accesses from `device` will not result in a read-only copy being\n created on that device as outlined in description for the advice\n :py:obj:`~.cudaMemAdviseSetReadMostly`. If the memory region refers\n to valid system-allocated pageable memory, then `device` must have a\n non-zero value for the device attribute\n :py:obj:`~.cudaDevAttrPageableMemoryAccess`.\n\n- :py:obj:`~.cudaMemAdviseUnsetPreferredLocation`: Undoes the effect of\n :py:obj:`~.cudaMemAdviseSetPreferredLocation` and changes the\n preferred location to none.\n\n- :py:obj:`~.cudaMemAdviseSetAccessedBy`: This advice implies that the\n data will be accessed by `device`. Passing in\n :py:obj:`~.cudaCpuDeviceId` for `device` will set the advice for the\n CPU. If `device` is a GPU, then the device attribute\n :py:obj:`~.cudaDevAttrConcurrentManagedAccess` must be non-zero. This\n advice does not cause data migration and has no impact on the\n location of the data per se. Instead, it causes the data to always be\n mapped in the specified processor's page tables, as long as the\n location of the data permits a mapping to be established. If the data\n gets migrated for any reason, the mappings are updated accordingly.\n This advice is r""ecommended in scenarios where data locality is not\n important, but avoiding faults is. Consider for example a system\n containing multiple GPUs with peer-to-peer access enabled, where the\n data located on one GPU is occasionally accessed by peer GPUs. In\n such scenarios, migrating data over to the other GPUs is not as\n important because the accesses are infrequent and the overhead of\n migration may be too high. But preventing faults can still help\n improve performance, and so having a mapping set up in advance is\n useful. Note that on CPU access of this data, the data may be\n migrated to host memory because the CPU typically cannot access\n device memory directly. Any GPU that had the\n :py:obj:`~.cudaMemAdviceSetAccessedBy` flag set for this data will\n now have its mapping updated to point to the page in host memory. If\n :py:obj:`~.cudaMemAdviseSetReadMostly` is also set on this memory\n region or any subset of it, then the policies associated with that\n advice will override the policies of this advice. Additionally, if\n the preferred location of this memory region or any subset of it is\n also `device`, then the policies associated with\n :py:obj:`~.cudaMemAdviseSetPreferredLocation` will override the\n policies of this advice. If the memory region refers to valid system-\n allocated pageable memory, then `device` must have a non-zero value\n for the device attribute :py:obj:`~.cudaDevAttrPageableMemoryAccess`.\n Additionally, if `device` has a non-zero value for the device\n attribute\n :py:obj:`~.cudaDevAttrPageableMemoryAccessUsesHostPageTables`, then\n this call has no effect.\n\n- :py:obj:`~.cudaMemAdviseUnsetAccessedBy`: Undoes the effect of\n :py:obj:`~.cudaMemAdviseSetAccessedBy`. Any mappings to the data from\n `device` may be removed at any time causing accesses to result in\n non-fatal page faults. If the memory region refers to valid system-\n allocated pageable memory, then `device` must have a non-zero value""\n for the device attribute :py:obj:`~.cudaDevAttrPageableMemoryAccess`.\n Additionally, if `device` has a non-zero value for the device\n attribute\n :py:obj:`~.cudaDevAttrPageableMemoryAccessUsesHostPageTables`, then\n this call has no effect.\n\nParameters\n----------\ndevPtr : Any\n Pointer to memory to set the advice for\ncount : size_t\n Size in bytes of the memory range\nadvice : :py:obj:`~.cudaMemoryAdvise`\n Advice to be applied for the specified memory range\ndevice : int\n Device to apply the advice for\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidDevice`\n\nSee Also\n--------\n:py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpyPeer`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy3DPeerAsync`, :py:obj:`~.cudaMemPrefetchAsync`, :py:obj:`~.cuMemAdvise`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_279cudaMemAdvise = {"cudaMemAdvise", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_279cudaMemAdvise, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_278cudaMemAdvise}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_279cudaMemAdvise(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_devPtr = 0; size_t __pyx_v_count; PyObject *__pyx_v_advice = 0; int __pyx_v_device; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[4] = {0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemAdvise (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_devPtr_2,&__pyx_mstate_global->__pyx_n_u_count,&__pyx_mstate_global->__pyx_n_u_advice,&__pyx_mstate_global->__pyx_n_u_device_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22701, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 22701, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22701, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22701, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22701, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemAdvise", 0) < (0)) __PYX_ERR(0, 22701, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 4; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemAdvise", 1, 4, 4, i); __PYX_ERR(0, 22701, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 4)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22701, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22701, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22701, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 22701, __pyx_L3_error) } __pyx_v_devPtr = values[0]; __pyx_v_count = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_count == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 22702, __pyx_L3_error) __pyx_v_advice = values[2]; __pyx_v_device = __Pyx_PyLong_As_int(values[3]); if (unlikely((__pyx_v_device == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 22702, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemAdvise", 1, 4, 4, __pyx_nargs); __PYX_ERR(0, 22701, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemAdvise", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_advice) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "advice"); __PYX_ERR(0, 22702, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_278cudaMemAdvise(__pyx_self, __pyx_v_devPtr, __pyx_v_count, __pyx_v_advice, __pyx_v_device); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_278cudaMemAdvise(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_devPtr, size_t __pyx_v_count, PyObject *__pyx_v_advice, int __pyx_v_device) { struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cydevPtr = NULL; void *__pyx_v_cydevPtr_ptr; enum cudaMemoryAdvise __pyx_v_cyadvice; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_5; enum cudaMemoryAdvise __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemAdvise", 0); /* "cuda/bindings/runtime.pyx":22857 * :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpyPeer`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy3DPeerAsync`, :py:obj:`~.cudaMemPrefetchAsync`, :py:obj:`~.cuMemAdvise` * """ * cydevPtr = _HelperInputVoidPtr(devPtr) # <<<<<<<<<<<<<< * cdef void* cydevPtr_ptr = cydevPtr.cptr * cdef cyruntime.cudaMemoryAdvise cyadvice = advice.value */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_devPtr}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22857, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_cydevPtr = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":22858 * """ * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemoryAdvise cyadvice = advice.value * with nogil: */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cydevPtr), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22858, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_5 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 22858, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cydevPtr_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_5)); /* "cuda/bindings/runtime.pyx":22859 * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr * cdef cyruntime.cudaMemoryAdvise cyadvice = advice.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemAdvise(cydevPtr_ptr, count, cyadvice, device) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_advice, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22859, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_6 = ((enum cudaMemoryAdvise)__Pyx_PyLong_As_enum__cudaMemoryAdvise(__pyx_t_1)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 22859, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cyadvice = __pyx_t_6; /* "cuda/bindings/runtime.pyx":22860 * cdef void* cydevPtr_ptr = cydevPtr.cptr * cdef cyruntime.cudaMemoryAdvise cyadvice = advice.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemAdvise(cydevPtr_ptr, count, cyadvice, device) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":22861 * cdef cyruntime.cudaMemoryAdvise cyadvice = advice.value * with nogil: * err = cyruntime.cudaMemAdvise(cydevPtr_ptr, count, cyadvice, device) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemAdvise(__pyx_v_cydevPtr_ptr, __pyx_v_count, __pyx_v_cyadvice, __pyx_v_device); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 22861, __pyx_L4_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":22860 * cdef void* cydevPtr_ptr = cydevPtr.cptr * cdef cyruntime.cudaMemoryAdvise cyadvice = advice.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemAdvise(cydevPtr_ptr, count, cyadvice, device) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":22862 * with nogil: * err = cyruntime.cudaMemAdvise(cydevPtr_ptr, count, cyadvice, device) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22862, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22862, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 22862, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22862, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 22862, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":22701 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemAdvise(devPtr, size_t count, advice not None : cudaMemoryAdvise, int device): * """ Advise about the usage of a given memory range. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemAdvise", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_cydevPtr); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":22864 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemAdvise_v2(devPtr, size_t count, advice not None : cudaMemoryAdvise, location not None : cudaMemLocation): * """ Advise about the usage of a given memory range. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_281cudaMemAdvise_v2(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_280cudaMemAdvise_v2, "cudaMemAdvise_v2(devPtr, size_t count, advice: cudaMemoryAdvise, cudaMemLocation location: cudaMemLocation)\n\nAdvise about the usage of a given memory range.\n\nAdvise the Unified Memory subsystem about the usage pattern for the\nmemory range starting at `devPtr` with a size of `count` bytes. The\nstart address and end address of the memory range will be rounded down\nand rounded up respectively to be aligned to CPU page size before the\nadvice is applied. The memory range must refer to managed memory\nallocated via :py:obj:`~.cudaMallocManaged` or declared via managed\nvariables. The memory range could also refer to system-allocated\npageable memory provided it represents a valid, host-accessible region\nof memory and all additional constraints imposed by `advice` as\noutlined below are also satisfied. Specifying an invalid system-\nallocated pageable memory range results in an error being returned.\n\nThe `advice` parameter can take the following values:\n\n- :py:obj:`~.cudaMemAdviseSetReadMostly`: This implies that the data is\n mostly going to be read from and only occasionally written to. Any\n read accesses from any processor to this region will create a read-\n only copy of at least the accessed pages in that processor's memory.\n Additionally, if :py:obj:`~.cudaMemPrefetchAsync` or\n :py:obj:`~.cudaMemPrefetchAsync_v2` is called on this region, it will\n create a read-only copy of the data on the destination processor. If\n the target location for :py:obj:`~.cudaMemPrefetchAsync_v2` is a host\n NUMA node and a read-only copy already exists on another host NUMA\n node, that copy will be migrated to the targeted host NUMA node. If\n any processor writes to this region, all copies of the corresponding\n page will be invalidated except for the one where the write occurred.\n If the writing processor is the CPU and the preferred location of the\n page is a host NUMA node, then the page will also be migrated to that\n host NUMA node. The `location` ""argument is ignored for this advice.\n Note that for a page to be read-duplicated, the accessing processor\n must either be the CPU or a GPU that has a non-zero value for the\n device attribute :py:obj:`~.cudaDevAttrConcurrentManagedAccess`.\n Also, if a context is created on a device that does not have the\n device attribute :py:obj:`~.cudaDevAttrConcurrentManagedAccess` set,\n then read-duplication will not occur until all such contexts are\n destroyed. If the memory region refers to valid system-allocated\n pageable memory, then the accessing device must have a non-zero value\n for the device attribute :py:obj:`~.cudaDevAttrPageableMemoryAccess`\n for a read-only copy to be created on that device. Note however that\n if the accessing device also has a non-zero value for the device\n attribute\n :py:obj:`~.cudaDevAttrPageableMemoryAccessUsesHostPageTables`, then\n setting this advice will not create a read-only copy when that device\n accesses this memory region.\n\n- :py:obj:`~.cudaMemAdviceUnsetReadMostly`: Undoes the effect of\n :py:obj:`~.cudaMemAdviseSetReadMostly` and also prevents the Unified\n Memory driver from attempting heuristic read-duplication on the\n memory range. Any read-duplicated copies of the data will be\n collapsed into a single copy. The location for the collapsed copy\n will be the preferred location if the page has a preferred location\n and one of the read-duplicated copies was resident at that location.\n Otherwise, the location chosen is arbitrary. Note: The `location`\n argument is ignored for this advice.\n\n- :py:obj:`~.cudaMemAdviseSetPreferredLocation`: This advice sets the\n preferred location for the data to be the memory belonging to\n `location`. When :py:obj:`~.cudaMemLocation.type` is\n :py:obj:`~.cudaMemLocationTypeHost`, :py:obj:`~.cudaMemLocation.id`\n is ignored and the preferred location is set to be host memory. To\n set the preferred location to a specific host NUMA node, applications\n mu""st set :py:obj:`~.cudaMemLocation.type` to\n :py:obj:`~.cudaMemLocationTypeHostNuma` and\n :py:obj:`~.cudaMemLocation.id` must specify the NUMA ID of the host\n NUMA node. If :py:obj:`~.cudaMemLocation.type` is set to\n :py:obj:`~.cudaMemLocationTypeHostNumaCurrent`,\n :py:obj:`~.cudaMemLocation.id` will be ignored and the host NUMA node\n closest to the calling thread's CPU will be used as the preferred\n location. If :py:obj:`~.cudaMemLocation.type` is a\n :py:obj:`~.cudaMemLocationTypeDevice`, then\n :py:obj:`~.cudaMemLocation.id` must be a valid device ordinal and the\n device must have a non-zero value for the device attribute\n :py:obj:`~.cudaDevAttrConcurrentManagedAccess`. Setting the preferred\n location does not cause data to migrate to that location immediately.\n Instead, it guides the migration policy when a fault occurs on that\n memory region. If the data is already in its preferred location and\n the faulting processor can establish a mapping without requiring the\n data to be migrated, then data migration will be avoided. On the\n other hand, if the data is not in its preferred location or if a\n direct mapping cannot be established, then it will be migrated to the\n processor accessing it. It is important to note that setting the\n preferred location does not prevent data prefetching done using\n :py:obj:`~.cudaMemPrefetchAsync`. Having a preferred location can\n override the page thrash detection and resolution logic in the\n Unified Memory driver. Normally, if a page is detected to be\n constantly thrashing between for example host and device memory, the\n page may eventually be pinned to host memory by the Unified Memory\n driver. But if the preferred location is set as device memory, then\n the page will continue to thrash indefinitely. If\n :py:obj:`~.cudaMemAdviseSetReadMostly` is also set on this memory\n region or any subset of it, then the policies associated with that\n advice will override the policies of th""is advice, unless read\n accesses from `location` will not result in a read-only copy being\n created on that procesor as outlined in description for the advice\n :py:obj:`~.cudaMemAdviseSetReadMostly`. If the memory region refers\n to valid system-allocated pageable memory, and\n :py:obj:`~.cudaMemLocation.type` is\n :py:obj:`~.cudaMemLocationTypeDevice` then\n :py:obj:`~.cudaMemLocation.id` must be a valid device that has a non-\n zero alue for the device attribute\n :py:obj:`~.cudaDevAttrPageableMemoryAccess`.\n\n- :py:obj:`~.cudaMemAdviseUnsetPreferredLocation`: Undoes the effect of\n :py:obj:`~.cudaMemAdviseSetPreferredLocation` and changes the\n preferred location to none. The `location` argument is ignored for\n this advice.\n\n- :py:obj:`~.cudaMemAdviseSetAccessedBy`: This advice implies that the\n data will be accessed by processor `location`. The\n :py:obj:`~.cudaMemLocation.type` must be either\n :py:obj:`~.cudaMemLocationTypeDevice` with\n :py:obj:`~.cudaMemLocation.id` representing a valid device ordinal or\n :py:obj:`~.cudaMemLocationTypeHost` and\n :py:obj:`~.cudaMemLocation.id` will be ignored. All other location\n types are invalid. If :py:obj:`~.cudaMemLocation.id` is a GPU, then\n the device attribute :py:obj:`~.cudaDevAttrConcurrentManagedAccess`\n must be non-zero. This advice does not cause data migration and has\n no impact on the location of the data per se. Instead, it causes the\n data to always be mapped in the specified processor's page tables, as\n long as the location of the data permits a mapping to be established.\n If the data gets migrated for any reason, the mappings are updated\n accordingly. This advice is recommended in scenarios where data\n locality is not important, but avoiding faults is. Consider for\n example a system containing multiple GPUs with peer-to-peer access\n enabled, where the data located on one GPU is occasionally accessed\n by peer GPUs. In such scenarios, migrating data over to ""the other\n GPUs is not as important because the accesses are infrequent and the\n overhead of migration may be too high. But preventing faults can\n still help improve performance, and so having a mapping set up in\n advance is useful. Note that on CPU access of this data, the data may\n be migrated to host memory because the CPU typically cannot access\n device memory directly. Any GPU that had the\n :py:obj:`~.cudaMemAdviseSetAccessedBy` flag set for this data will\n now have its mapping updated to point to the page in host memory. If\n :py:obj:`~.cudaMemAdviseSetReadMostly` is also set on this memory\n region or any subset of it, then the policies associated with that\n advice will override the policies of this advice. Additionally, if\n the preferred location of this memory region or any subset of it is\n also `location`, then the policies associated with\n :py:obj:`~.CU_MEM_ADVISE_SET_PREFERRED_LOCATION` will override the\n policies of this advice. If the memory region refers to valid system-\n allocated pageable memory, and :py:obj:`~.cudaMemLocation.type` is\n :py:obj:`~.cudaMemLocationTypeDevice` then device in\n :py:obj:`~.cudaMemLocation.id` must have a non-zero value for the\n device attribute :py:obj:`~.cudaDevAttrPageableMemoryAccess`.\n Additionally, if :py:obj:`~.cudaMemLocation.id` has a non-zero value\n for the device attribute\n :py:obj:`~.cudaDevAttrPageableMemoryAccessUsesHostPageTables`, then\n this call has no effect.\n\n- :py:obj:`~.CU_MEM_ADVISE_UNSET_ACCESSED_BY`: Undoes the effect of\n :py:obj:`~.cudaMemAdviseSetAccessedBy`. Any mappings to the data from\n `location` may be removed at any time causing accesses to result in\n non-fatal page faults. If the memory region refers to valid system-\n allocated pageable memory, and :py:obj:`~.cudaMemLocation.type` is\n :py:obj:`~.cudaMemLocationTypeDevice` then device in\n :py:obj:`~.cudaMemLocation.id` must have a non-zero value for the\n device attribute :py:obj:`~.""cudaDevAttrPageableMemoryAccess`.\n Additionally, if :py:obj:`~.cudaMemLocation.id` has a non-zero value\n for the device attribute\n :py:obj:`~.cudaDevAttrPageableMemoryAccessUsesHostPageTables`, then\n this call has no effect.\n\nParameters\n----------\ndevPtr : Any\n Pointer to memory to set the advice for\ncount : size_t\n Size in bytes of the memory range\nadvice : :py:obj:`~.cudaMemoryAdvise`\n Advice to be applied for the specified memory range\nlocation : :py:obj:`~.cudaMemLocation`\n location to apply the advice for\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidDevice`\n\nSee Also\n--------\n:py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpyPeer`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy3DPeerAsync`, :py:obj:`~.cudaMemPrefetchAsync`, :py:obj:`~.cuMemAdvise`, :py:obj:`~.cuMemAdvise_v2`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_281cudaMemAdvise_v2 = {"cudaMemAdvise_v2", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_281cudaMemAdvise_v2, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_280cudaMemAdvise_v2}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_281cudaMemAdvise_v2(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_devPtr = 0; size_t __pyx_v_count; PyObject *__pyx_v_advice = 0; struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemLocation *__pyx_v_location = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[4] = {0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemAdvise_v2 (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_devPtr_2,&__pyx_mstate_global->__pyx_n_u_count,&__pyx_mstate_global->__pyx_n_u_advice,&__pyx_mstate_global->__pyx_n_u_location_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 22864, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 22864, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22864, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22864, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22864, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemAdvise_v2", 0) < (0)) __PYX_ERR(0, 22864, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 4; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemAdvise_v2", 1, 4, 4, i); __PYX_ERR(0, 22864, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 4)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 22864, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 22864, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 22864, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 22864, __pyx_L3_error) } __pyx_v_devPtr = values[0]; __pyx_v_count = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_count == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 22865, __pyx_L3_error) __pyx_v_advice = values[2]; __pyx_v_location = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemLocation *)values[3]); } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemAdvise_v2", 1, 4, 4, __pyx_nargs); __PYX_ERR(0, 22864, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemAdvise_v2", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_advice) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "advice"); __PYX_ERR(0, 22865, __pyx_L1_error) } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_location), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemLocation, 0, "location", 0))) __PYX_ERR(0, 22865, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_280cudaMemAdvise_v2(__pyx_self, __pyx_v_devPtr, __pyx_v_count, __pyx_v_advice, __pyx_v_location); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_280cudaMemAdvise_v2(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_devPtr, size_t __pyx_v_count, PyObject *__pyx_v_advice, struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemLocation *__pyx_v_location) { struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cydevPtr = NULL; void *__pyx_v_cydevPtr_ptr; enum cudaMemoryAdvise __pyx_v_cyadvice; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_5; enum cudaMemoryAdvise __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemAdvise_v2", 0); /* "cuda/bindings/runtime.pyx":23051 * :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpyPeer`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy3DPeerAsync`, :py:obj:`~.cudaMemPrefetchAsync`, :py:obj:`~.cuMemAdvise`, :py:obj:`~.cuMemAdvise_v2` * """ * cydevPtr = _HelperInputVoidPtr(devPtr) # <<<<<<<<<<<<<< * cdef void* cydevPtr_ptr = cydevPtr.cptr * cdef cyruntime.cudaMemoryAdvise cyadvice = advice.value */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_devPtr}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23051, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_cydevPtr = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":23052 * """ * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemoryAdvise cyadvice = advice.value * with nogil: */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cydevPtr), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23052, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_5 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 23052, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cydevPtr_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_5)); /* "cuda/bindings/runtime.pyx":23053 * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr * cdef cyruntime.cudaMemoryAdvise cyadvice = advice.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemAdvise_v2(cydevPtr_ptr, count, cyadvice, location._pvt_ptr[0]) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_advice, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23053, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_6 = ((enum cudaMemoryAdvise)__Pyx_PyLong_As_enum__cudaMemoryAdvise(__pyx_t_1)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 23053, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cyadvice = __pyx_t_6; /* "cuda/bindings/runtime.pyx":23054 * cdef void* cydevPtr_ptr = cydevPtr.cptr * cdef cyruntime.cudaMemoryAdvise cyadvice = advice.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemAdvise_v2(cydevPtr_ptr, count, cyadvice, location._pvt_ptr[0]) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":23055 * cdef cyruntime.cudaMemoryAdvise cyadvice = advice.value * with nogil: * err = cyruntime.cudaMemAdvise_v2(cydevPtr_ptr, count, cyadvice, location._pvt_ptr[0]) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemAdvise_v2(__pyx_v_cydevPtr_ptr, __pyx_v_count, __pyx_v_cyadvice, (__pyx_v_location->_pvt_ptr[0])); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23055, __pyx_L4_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":23054 * cdef void* cydevPtr_ptr = cydevPtr.cptr * cdef cyruntime.cudaMemoryAdvise cyadvice = advice.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemAdvise_v2(cydevPtr_ptr, count, cyadvice, location._pvt_ptr[0]) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":23056 * with nogil: * err = cyruntime.cudaMemAdvise_v2(cydevPtr_ptr, count, cyadvice, location._pvt_ptr[0]) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23056, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23056, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23056, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23056, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 23056, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":22864 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemAdvise_v2(devPtr, size_t count, advice not None : cudaMemoryAdvise, location not None : cudaMemLocation): * """ Advise about the usage of a given memory range. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemAdvise_v2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_cydevPtr); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":23058 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemRangeGetAttribute(size_t dataSize, attribute not None : cudaMemRangeAttribute, devPtr, size_t count): * """ Query an attribute of a given memory range. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_283cudaMemRangeGetAttribute(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_282cudaMemRangeGetAttribute, "cudaMemRangeGetAttribute(size_t dataSize, attribute: cudaMemRangeAttribute, devPtr, size_t count)\n\nQuery an attribute of a given memory range.\n\nQuery an attribute about the memory range starting at `devPtr` with a\nsize of `count` bytes. The memory range must refer to managed memory\nallocated via :py:obj:`~.cudaMallocManaged` or declared via managed\nvariables.\n\nThe `attribute` parameter can take the following values:\n\n- :py:obj:`~.cudaMemRangeAttributeReadMostly`: If this attribute is\n specified, `data` will be interpreted as a 32-bit integer, and\n `dataSize` must be 4. The result returned will be 1 if all pages in\n the given memory range have read-duplication enabled, or 0 otherwise.\n\n- :py:obj:`~.cudaMemRangeAttributePreferredLocation`: If this attribute\n is specified, `data` will be interpreted as a 32-bit integer, and\n `dataSize` must be 4. The result returned will be a GPU device id if\n all pages in the memory range have that GPU as their preferred\n location, or it will be cudaCpuDeviceId if all pages in the memory\n range have the CPU as their preferred location, or it will be\n cudaInvalidDeviceId if either all the pages don't have the same\n preferred location or some of the pages don't have a preferred\n location at all. Note that the actual location of the pages in the\n memory range at the time of the query may be different from the\n preferred location.\n\n- :py:obj:`~.cudaMemRangeAttributeAccessedBy`: If this attribute is\n specified, `data` will be interpreted as an array of 32-bit integers,\n and `dataSize` must be a non-zero multiple of 4. The result returned\n will be a list of device ids that had\n :py:obj:`~.cudaMemAdviceSetAccessedBy` set for that entire memory\n range. If any device does not have that advice set for the entire\n memory range, that device will not be included. If `data` is larger\n than the number of devices that have that advice set for that memory\n range, cudaInvalidDeviceId will be retu""rned in all the extra space\n provided. For ex., if `dataSize` is 12 (i.e. `data` has 3 elements)\n and only device 0 has the advice set, then the result returned will\n be { 0, cudaInvalidDeviceId, cudaInvalidDeviceId }. If `data` is\n smaller than the number of devices that have that advice set, then\n only as many devices will be returned as can fit in the array. There\n is no guarantee on which specific devices will be returned, however.\n\n- :py:obj:`~.cudaMemRangeAttributeLastPrefetchLocation`: If this\n attribute is specified, `data` will be interpreted as a 32-bit\n integer, and `dataSize` must be 4. The result returned will be the\n last location to which all pages in the memory range were prefetched\n explicitly via :py:obj:`~.cudaMemPrefetchAsync`. This will either be\n a GPU id or cudaCpuDeviceId depending on whether the last location\n for prefetch was a GPU or the CPU respectively. If any page in the\n memory range was never explicitly prefetched or if all pages were not\n prefetched to the same location, cudaInvalidDeviceId will be\n returned. Note that this simply returns the last location that the\n applicaton requested to prefetch the memory range to. It gives no\n indication as to whether the prefetch operation to that location has\n completed or even begun.\n\n- :py:obj:`~.cudaMemRangeAttributePreferredLocationType`: If this\n attribute is specified, `data` will be interpreted as a\n :py:obj:`~.cudaMemLocationType`, and `dataSize` must be\n sizeof(cudaMemLocationType). The :py:obj:`~.cudaMemLocationType`\n returned will be :py:obj:`~.cudaMemLocationTypeDevice` if all pages\n in the memory range have the same GPU as their preferred location, or\n :py:obj:`~.cudaMemLocationType` will be\n :py:obj:`~.cudaMemLocationTypeHost` if all pages in the memory range\n have the CPU as their preferred location, or or it will be\n :py:obj:`~.cudaMemLocationTypeHostNuma` if all the pages in the\n memory range have the same host NUMA n""ode ID as their preferred\n location or it will be :py:obj:`~.cudaMemLocationTypeInvalid` if\n either all the pages don't have the same preferred location or some\n of the pages don't have a preferred location at all. Note that the\n actual location type of the pages in the memory range at the time of\n the query may be different from the preferred location type.\n\n - :py:obj:`~.cudaMemRangeAttributePreferredLocationId`: If this\n attribute is specified, `data` will be interpreted as a 32-bit\n integer, and `dataSize` must be 4. If the\n :py:obj:`~.cudaMemRangeAttributePreferredLocationType` query for\n the same address range returns\n :py:obj:`~.cudaMemLocationTypeDevice`, it will be a valid device\n ordinal or if it returns :py:obj:`~.cudaMemLocationTypeHostNuma`,\n it will be a valid host NUMA node ID or if it returns any other\n location type, the id should be ignored.\n\n- :py:obj:`~.cudaMemRangeAttributeLastPrefetchLocationType`: If this\n attribute is specified, `data` will be interpreted as a\n :py:obj:`~.cudaMemLocationType`, and `dataSize` must be\n sizeof(cudaMemLocationType). The result returned will be the last\n location type to which all pages in the memory range were prefetched\n explicitly via :py:obj:`~.cuMemPrefetchAsync`. The\n :py:obj:`~.cudaMemLocationType` returned will be\n :py:obj:`~.cudaMemLocationTypeDevice` if the last prefetch location\n was the GPU or :py:obj:`~.cudaMemLocationTypeHost` if it was the CPU\n or :py:obj:`~.cudaMemLocationTypeHostNuma` if the last prefetch\n location was a specific host NUMA node. If any page in the memory\n range was never explicitly prefetched or if all pages were not\n prefetched to the same location, :py:obj:`~.CUmemLocationType` will\n be :py:obj:`~.cudaMemLocationTypeInvalid`. Note that this simply\n returns the last location type that the application requested to\n prefetch the memory range to. It gives no indication as to whether\n the prefetch operatio""n to that location has completed or even begun.\n\n - :py:obj:`~.cudaMemRangeAttributeLastPrefetchLocationId`: If this\n attribute is specified, `data` will be interpreted as a 32-bit\n integer, and `dataSize` must be 4. If the\n :py:obj:`~.cudaMemRangeAttributeLastPrefetchLocationType` query for\n the same address range returns\n :py:obj:`~.cudaMemLocationTypeDevice`, it will be a valid device\n ordinal or if it returns :py:obj:`~.cudaMemLocationTypeHostNuma`,\n it will be a valid host NUMA node ID or if it returns any other\n location type, the id should be ignored.\n\nParameters\n----------\ndataSize : size_t\n Array containing the size of data\nattribute : :py:obj:`~.cudaMemRangeAttribute`\n The attribute to query\ndevPtr : Any\n Start of the range to query\ncount : size_t\n Size of the range to query\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\ndata : Any\n A pointers to a memory location where the result of each attribute\n query will be written to.\n\nSee Also\n--------\n:py:obj:`~.cudaMemRangeGetAttributes`, :py:obj:`~.cudaMemPrefetchAsync`, :py:obj:`~.cudaMemAdvise`, :py:obj:`~.cuMemRangeGetAttribute`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_283cudaMemRangeGetAttribute = {"cudaMemRangeGetAttribute", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_283cudaMemRangeGetAttribute, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_282cudaMemRangeGetAttribute}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_283cudaMemRangeGetAttribute(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { size_t __pyx_v_dataSize; PyObject *__pyx_v_attribute = 0; PyObject *__pyx_v_devPtr = 0; size_t __pyx_v_count; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[4] = {0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemRangeGetAttribute (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_dataSize,&__pyx_mstate_global->__pyx_n_u_attribute,&__pyx_mstate_global->__pyx_n_u_devPtr_2,&__pyx_mstate_global->__pyx_n_u_count,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23058, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 23058, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 23058, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23058, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23058, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemRangeGetAttribute", 0) < (0)) __PYX_ERR(0, 23058, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 4; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemRangeGetAttribute", 1, 4, 4, i); __PYX_ERR(0, 23058, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 4)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23058, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23058, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 23058, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 23058, __pyx_L3_error) } __pyx_v_dataSize = __Pyx_PyLong_As_size_t(values[0]); if (unlikely((__pyx_v_dataSize == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 23059, __pyx_L3_error) __pyx_v_attribute = values[1]; __pyx_v_devPtr = values[2]; __pyx_v_count = __Pyx_PyLong_As_size_t(values[3]); if (unlikely((__pyx_v_count == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 23059, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemRangeGetAttribute", 1, 4, 4, __pyx_nargs); __PYX_ERR(0, 23058, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemRangeGetAttribute", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_attribute) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "attribute"); __PYX_ERR(0, 23059, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_282cudaMemRangeGetAttribute(__pyx_self, __pyx_v_dataSize, __pyx_v_attribute, __pyx_v_devPtr, __pyx_v_count); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_282cudaMemRangeGetAttribute(CYTHON_UNUSED PyObject *__pyx_self, size_t __pyx_v_dataSize, PyObject *__pyx_v_attribute, PyObject *__pyx_v_devPtr, size_t __pyx_v_count) { struct __pyx_obj_4cuda_8bindings_7runtime__HelperCUmem_range_attribute *__pyx_v_cydata = 0; void *__pyx_v_cydata_ptr; enum cudaMemRangeAttribute __pyx_v_cyattribute; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cydevPtr = NULL; void *__pyx_v_cydevPtr_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; enum cudaMemRangeAttribute __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemRangeGetAttribute", 0); /* "cuda/bindings/runtime.pyx":23194 * :py:obj:`~.cudaMemRangeGetAttributes`, :py:obj:`~.cudaMemPrefetchAsync`, :py:obj:`~.cudaMemAdvise`, :py:obj:`~.cuMemRangeGetAttribute` * """ * cdef _HelperCUmem_range_attribute cydata = _HelperCUmem_range_attribute(attribute, dataSize) # <<<<<<<<<<<<<< * cdef void* cydata_ptr = cydata.cptr * cdef cyruntime.cudaMemRangeAttribute cyattribute = attribute.value */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperCUmem_range_attribute); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperCUmem_range_attribute); __pyx_t_4 = __Pyx_PyLong_FromSize_t(__pyx_v_dataSize); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23194, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = 1; { PyObject *__pyx_callargs[3] = {__pyx_t_2, __pyx_v_attribute, __pyx_t_4}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_5, (3-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23194, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_cydata = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperCUmem_range_attribute *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":23195 * """ * cdef _HelperCUmem_range_attribute cydata = _HelperCUmem_range_attribute(attribute, dataSize) * cdef void* cydata_ptr = cydata.cptr # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemRangeAttribute cyattribute = attribute.value * cydevPtr = _HelperInputVoidPtr(devPtr) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cydata), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23195, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 23195, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cydata_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":23196 * cdef _HelperCUmem_range_attribute cydata = _HelperCUmem_range_attribute(attribute, dataSize) * cdef void* cydata_ptr = cydata.cptr * cdef cyruntime.cudaMemRangeAttribute cyattribute = attribute.value # <<<<<<<<<<<<<< * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_attribute, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23196, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_7 = ((enum cudaMemRangeAttribute)__Pyx_PyLong_As_enum__cudaMemRangeAttribute(__pyx_t_1)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 23196, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cyattribute = __pyx_t_7; /* "cuda/bindings/runtime.pyx":23197 * cdef void* cydata_ptr = cydata.cptr * cdef cyruntime.cudaMemRangeAttribute cyattribute = attribute.value * cydevPtr = _HelperInputVoidPtr(devPtr) # <<<<<<<<<<<<<< * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_devPtr}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23197, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_cydevPtr = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":23198 * cdef cyruntime.cudaMemRangeAttribute cyattribute = attribute.value * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemRangeGetAttribute(cydata_ptr, dataSize, cyattribute, cydevPtr_ptr, count) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cydevPtr), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23198, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 23198, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cydevPtr_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":23199 * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemRangeGetAttribute(cydata_ptr, dataSize, cyattribute, cydevPtr_ptr, count) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":23200 * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: * err = cyruntime.cudaMemRangeGetAttribute(cydata_ptr, dataSize, cyattribute, cydevPtr_ptr, count) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemRangeGetAttribute(__pyx_v_cydata_ptr, __pyx_v_dataSize, __pyx_v_cyattribute, __pyx_v_cydevPtr_ptr, __pyx_v_count); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23200, __pyx_L4_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":23199 * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemRangeGetAttribute(cydata_ptr, dataSize, cyattribute, cydevPtr_ptr, count) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":23201 * with nogil: * err = cyruntime.cudaMemRangeGetAttribute(cydata_ptr, dataSize, cyattribute, cydevPtr_ptr, count) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], cydata.pyObj()) */ __pyx_t_9 = (__pyx_v_err != cudaSuccess); if (__pyx_t_9) { /* "cuda/bindings/runtime.pyx":23202 * err = cyruntime.cudaMemRangeGetAttribute(cydata_ptr, dataSize, cyattribute, cydevPtr_ptr, count) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], cydata.pyObj()) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23202, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23202, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23202, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23202, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 23202, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, Py_None) != (0)) __PYX_ERR(0, 23202, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":23201 * with nogil: * err = cyruntime.cudaMemRangeGetAttribute(cydata_ptr, dataSize, cyattribute, cydevPtr_ptr, count) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], cydata.pyObj()) */ } /* "cuda/bindings/runtime.pyx":23203 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], cydata.pyObj()) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23203, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23203, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23203, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_4 = ((PyObject *)__pyx_v_cydata); __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; { PyObject *__pyx_callargs[2] = {__pyx_t_4, NULL}; __pyx_t_3 = __Pyx_PyObject_FastCallMethod(__pyx_mstate_global->__pyx_n_u_pyObj, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23203, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23203, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 23203, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3) != (0)) __PYX_ERR(0, 23203, __pyx_L1_error); __pyx_t_1 = 0; __pyx_t_3 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":23058 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemRangeGetAttribute(size_t dataSize, attribute not None : cudaMemRangeAttribute, devPtr, size_t count): * """ Query an attribute of a given memory range. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemRangeGetAttribute", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_cydata); __Pyx_XDECREF((PyObject *)__pyx_v_cydevPtr); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":23205 * return (_dict_cudaError_t[err], cydata.pyObj()) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemRangeGetAttributes(dataSizes : tuple[int] | list[int], attributes : Optional[tuple[cudaMemRangeAttribute] | list[cudaMemRangeAttribute]], size_t numAttributes, devPtr, size_t count): * """ Query attributes of a given memory range. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_285cudaMemRangeGetAttributes(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_284cudaMemRangeGetAttributes, "cudaMemRangeGetAttributes(dataSizes: tuple[int] | list[int], attributes: Optional[tuple[cudaMemRangeAttribute] | list[cudaMemRangeAttribute]], size_t numAttributes, devPtr, size_t count)\n\nQuery attributes of a given memory range.\n\nQuery attributes of the memory range starting at `devPtr` with a size\nof `count` bytes. The memory range must refer to managed memory\nallocated via :py:obj:`~.cudaMallocManaged` or declared via managed\nvariables. The `attributes` array will be interpreted to have\n`numAttributes` entries. The `dataSizes` array will also be interpreted\nto have `numAttributes` entries. The results of the query will be\nstored in `data`.\n\nThe list of supported attributes are given below. Please refer to\n:py:obj:`~.cudaMemRangeGetAttribute` for attribute descriptions and\nrestrictions.\n\n- :py:obj:`~.cudaMemRangeAttributeReadMostly`\n\n- :py:obj:`~.cudaMemRangeAttributePreferredLocation`\n\n- :py:obj:`~.cudaMemRangeAttributeAccessedBy`\n\n- :py:obj:`~.cudaMemRangeAttributeLastPrefetchLocation`\n\n- :: cudaMemRangeAttributePreferredLocationType\n\n- :: cudaMemRangeAttributePreferredLocationId\n\n- :: cudaMemRangeAttributeLastPrefetchLocationType\n\n- :: cudaMemRangeAttributeLastPrefetchLocationId\n\nParameters\n----------\ndataSizes : list[int]\n Array containing the sizes of each result\nattributes : list[:py:obj:`~.cudaMemRangeAttribute`]\n An array of attributes to query (numAttributes and the number of\n attributes in this array should match)\nnumAttributes : size_t\n Number of attributes to query\ndevPtr : Any\n Start of the range to query\ncount : size_t\n Size of the range to query\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\ndata : list[Any]\n A two-dimensional array containing pointers to memory locations\n where the result of each attribute query will be written to.\n\nSee Also\n--------\n:py:obj:`~.cudaMemRangeGetAttribute`, :py:obj:`~.cudaMemAdvise`, :py:obj:`~.""cudaMemPrefetchAsync`, :py:obj:`~.cuMemRangeGetAttributes`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_285cudaMemRangeGetAttributes = {"cudaMemRangeGetAttributes", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_285cudaMemRangeGetAttributes, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_284cudaMemRangeGetAttributes}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_285cudaMemRangeGetAttributes(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_dataSizes = 0; PyObject *__pyx_v_attributes = 0; size_t __pyx_v_numAttributes; PyObject *__pyx_v_devPtr = 0; size_t __pyx_v_count; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[5] = {0,0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemRangeGetAttributes (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_dataSizes,&__pyx_mstate_global->__pyx_n_u_attributes,&__pyx_mstate_global->__pyx_n_u_numAttributes,&__pyx_mstate_global->__pyx_n_u_devPtr_2,&__pyx_mstate_global->__pyx_n_u_count,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23205, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 5: values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 23205, __pyx_L3_error) CYTHON_FALLTHROUGH; case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 23205, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 23205, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23205, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23205, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemRangeGetAttributes", 0) < (0)) __PYX_ERR(0, 23205, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 5; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemRangeGetAttributes", 1, 5, 5, i); __PYX_ERR(0, 23205, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 5)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23205, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23205, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 23205, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 23205, __pyx_L3_error) values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 23205, __pyx_L3_error) } __pyx_v_dataSizes = values[0]; __pyx_v_attributes = values[1]; __pyx_v_numAttributes = __Pyx_PyLong_As_size_t(values[2]); if (unlikely((__pyx_v_numAttributes == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 23206, __pyx_L3_error) __pyx_v_devPtr = values[3]; __pyx_v_count = __Pyx_PyLong_As_size_t(values[4]); if (unlikely((__pyx_v_count == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 23206, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemRangeGetAttributes", 1, 5, 5, __pyx_nargs); __PYX_ERR(0, 23205, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemRangeGetAttributes", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_284cudaMemRangeGetAttributes(__pyx_self, __pyx_v_dataSizes, __pyx_v_attributes, __pyx_v_numAttributes, __pyx_v_devPtr, __pyx_v_count); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_25cudaMemRangeGetAttributes_2generator91(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value); /* proto */ /* "cuda/bindings/runtime.pyx":23264 * """ * attributes = [] if attributes is None else attributes * if not all(isinstance(_x, (cudaMemRangeAttribute)) for _x in attributes): # <<<<<<<<<<<<<< * raise TypeError("Argument 'attributes' is not instance of type (expected tuple[cyruntime.cudaMemRangeAttribute] or list[cyruntime.cudaMemRangeAttribute]") * if not all(isinstance(_x, (int)) for _x in dataSizes): */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_25cudaMemRangeGetAttributes_genexpr(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_genexpr_arg_0) { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_91_genexpr *__pyx_cur_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("genexpr", 0); __pyx_cur_scope = (struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_91_genexpr *)__pyx_tp_new_4cuda_8bindings_7runtime___pyx_scope_struct_91_genexpr(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime___pyx_scope_struct_91_genexpr, __pyx_mstate_global->__pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_91_genexpr *)Py_None); __Pyx_INCREF(Py_None); __PYX_ERR(0, 23264, __pyx_L1_error) } else { __Pyx_GOTREF((PyObject *)__pyx_cur_scope); } __pyx_cur_scope->__pyx_genexpr_arg_0 = __pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_cur_scope->__pyx_genexpr_arg_0); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_genexpr_arg_0); { __pyx_CoroutineObject *gen = __Pyx_Generator_New((__pyx_coroutine_body_t) __pyx_gb_4cuda_8bindings_7runtime_25cudaMemRangeGetAttributes_2generator91, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[91]), (PyObject *) __pyx_cur_scope, __pyx_mstate_global->__pyx_n_u_genexpr, __pyx_mstate_global->__pyx_n_u_cudaMemRangeGetAttributes_locals, __pyx_mstate_global->__pyx_n_u_cuda_bindings_runtime); if (unlikely(!gen)) __PYX_ERR(0, 23264, __pyx_L1_error) __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; } /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemRangeGetAttributes.genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_DECREF((PyObject *)__pyx_cur_scope); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_25cudaMemRangeGetAttributes_2generator91(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value) /* generator body */ { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_91_genexpr *__pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_91_genexpr *)__pyx_generator->closure); PyObject *__pyx_r = NULL; PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *(*__pyx_t_3)(PyObject *); PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("genexpr", 0); switch (__pyx_generator->resume_label) { case 0: goto __pyx_L3_first_run; default: /* CPython raises the right error here */ __Pyx_RefNannyFinishContext(); return NULL; } __pyx_L3_first_run:; if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 23264, __pyx_L1_error) if (unlikely(!__pyx_cur_scope->__pyx_genexpr_arg_0)) { __Pyx_RaiseUnboundLocalError(".0"); __PYX_ERR(0, 23264, __pyx_L1_error) } if (likely(PyList_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) || PyTuple_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) { __pyx_t_1 = __pyx_cur_scope->__pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; __pyx_t_3 = NULL; } else { __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_cur_scope->__pyx_genexpr_arg_0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23264, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23264, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_3)) { if (likely(PyList_CheckExact(__pyx_t_1))) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 23264, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } __pyx_t_4 = __Pyx_PyList_GetItemRef(__pyx_t_1, __pyx_t_2); ++__pyx_t_2; } else { { Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 23264, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2)); #else __pyx_t_4 = __Pyx_PySequence_ITEM(__pyx_t_1, __pyx_t_2); #endif ++__pyx_t_2; } if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23264, __pyx_L1_error) } else { __pyx_t_4 = __pyx_t_3(__pyx_t_1); if (unlikely(!__pyx_t_4)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 23264, __pyx_L1_error) PyErr_Clear(); } break; } } __Pyx_GOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v__x); __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v__x, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_cudaMemRangeAttribute); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23264, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyObject_IsInstance(__pyx_cur_scope->__pyx_v__x, __pyx_t_4); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(0, 23264, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = (!__pyx_t_5); if (__pyx_t_6) { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_False); __pyx_r = Py_False; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_True); __pyx_r = Py_True; goto __pyx_L0; } CYTHON_MAYBE_UNUSED_VAR(__pyx_cur_scope); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); if (__Pyx_PyErr_Occurred()) { __Pyx_Generator_Replace_StopIteration(0); __Pyx_AddTraceback("genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); } __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); #if !CYTHON_USE_EXC_INFO_STACK __Pyx_Coroutine_ResetAndClearException(__pyx_generator); #endif __pyx_generator->resume_label = -1; __Pyx_Coroutine_clear((PyObject*)__pyx_generator); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_25cudaMemRangeGetAttributes_5generator92(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value); /* proto */ /* "cuda/bindings/runtime.pyx":23266 * if not all(isinstance(_x, (cudaMemRangeAttribute)) for _x in attributes): * raise TypeError("Argument 'attributes' is not instance of type (expected tuple[cyruntime.cudaMemRangeAttribute] or list[cyruntime.cudaMemRangeAttribute]") * if not all(isinstance(_x, (int)) for _x in dataSizes): # <<<<<<<<<<<<<< * raise TypeError("Argument 'dataSizes' is not instance of type (expected tuple[int] or list[int]") * pylist = [_HelperCUmem_range_attribute(pyattributes, pydataSizes) for (pyattributes, pydataSizes) in zip(attributes, dataSizes)] */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_25cudaMemRangeGetAttributes_3genexpr(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_genexpr_arg_0) { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_92_genexpr *__pyx_cur_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("genexpr", 0); __pyx_cur_scope = (struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_92_genexpr *)__pyx_tp_new_4cuda_8bindings_7runtime___pyx_scope_struct_92_genexpr(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime___pyx_scope_struct_92_genexpr, __pyx_mstate_global->__pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_92_genexpr *)Py_None); __Pyx_INCREF(Py_None); __PYX_ERR(0, 23266, __pyx_L1_error) } else { __Pyx_GOTREF((PyObject *)__pyx_cur_scope); } __pyx_cur_scope->__pyx_genexpr_arg_0 = __pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_cur_scope->__pyx_genexpr_arg_0); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_genexpr_arg_0); { __pyx_CoroutineObject *gen = __Pyx_Generator_New((__pyx_coroutine_body_t) __pyx_gb_4cuda_8bindings_7runtime_25cudaMemRangeGetAttributes_5generator92, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[92]), (PyObject *) __pyx_cur_scope, __pyx_mstate_global->__pyx_n_u_genexpr, __pyx_mstate_global->__pyx_n_u_cudaMemRangeGetAttributes_locals, __pyx_mstate_global->__pyx_n_u_cuda_bindings_runtime); if (unlikely(!gen)) __PYX_ERR(0, 23266, __pyx_L1_error) __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; } /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemRangeGetAttributes.genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_DECREF((PyObject *)__pyx_cur_scope); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_25cudaMemRangeGetAttributes_5generator92(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value) /* generator body */ { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_92_genexpr *__pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_92_genexpr *)__pyx_generator->closure); PyObject *__pyx_r = NULL; PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *(*__pyx_t_3)(PyObject *); PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("genexpr", 0); switch (__pyx_generator->resume_label) { case 0: goto __pyx_L3_first_run; default: /* CPython raises the right error here */ __Pyx_RefNannyFinishContext(); return NULL; } __pyx_L3_first_run:; if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 23266, __pyx_L1_error) if (unlikely(!__pyx_cur_scope->__pyx_genexpr_arg_0)) { __Pyx_RaiseUnboundLocalError(".0"); __PYX_ERR(0, 23266, __pyx_L1_error) } if (likely(PyList_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) || PyTuple_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) { __pyx_t_1 = __pyx_cur_scope->__pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; __pyx_t_3 = NULL; } else { __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_cur_scope->__pyx_genexpr_arg_0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23266, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23266, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_3)) { if (likely(PyList_CheckExact(__pyx_t_1))) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 23266, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } __pyx_t_4 = __Pyx_PyList_GetItemRef(__pyx_t_1, __pyx_t_2); ++__pyx_t_2; } else { { Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 23266, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2)); #else __pyx_t_4 = __Pyx_PySequence_ITEM(__pyx_t_1, __pyx_t_2); #endif ++__pyx_t_2; } if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23266, __pyx_L1_error) } else { __pyx_t_4 = __pyx_t_3(__pyx_t_1); if (unlikely(!__pyx_t_4)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 23266, __pyx_L1_error) PyErr_Clear(); } break; } } __Pyx_GOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v__x); __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v__x, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = PyLong_Check(__pyx_cur_scope->__pyx_v__x); __pyx_t_6 = (!__pyx_t_5); if (__pyx_t_6) { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_False); __pyx_r = Py_False; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_True); __pyx_r = Py_True; goto __pyx_L0; } CYTHON_MAYBE_UNUSED_VAR(__pyx_cur_scope); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); if (__Pyx_PyErr_Occurred()) { __Pyx_Generator_Replace_StopIteration(0); __Pyx_AddTraceback("genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); } __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); #if !CYTHON_USE_EXC_INFO_STACK __Pyx_Coroutine_ResetAndClearException(__pyx_generator); #endif __pyx_generator->resume_label = -1; __Pyx_Coroutine_clear((PyObject*)__pyx_generator); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":23205 * return (_dict_cudaError_t[err], cydata.pyObj()) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemRangeGetAttributes(dataSizes : tuple[int] | list[int], attributes : Optional[tuple[cudaMemRangeAttribute] | list[cudaMemRangeAttribute]], size_t numAttributes, devPtr, size_t count): * """ Query attributes of a given memory range. */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_284cudaMemRangeGetAttributes(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_dataSizes, PyObject *__pyx_v_attributes, size_t __pyx_v_numAttributes, PyObject *__pyx_v_devPtr, size_t __pyx_v_count) { PyObject *__pyx_v_pylist = NULL; struct __pyx_obj_4cuda_8bindings_7runtime__InputVoidPtrPtrHelper *__pyx_v_voidStarHelperdata = 0; void **__pyx_v_cyvoidStarHelper_ptr; std::vector __pyx_v_cydataSizes; std::vector __pyx_v_cyattributes; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cydevPtr = NULL; void *__pyx_v_cydevPtr_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_gb_4cuda_8bindings_7runtime_25cudaMemRangeGetAttributes_2generator91 = 0; PyObject *__pyx_gb_4cuda_8bindings_7runtime_25cudaMemRangeGetAttributes_5generator92 = 0; PyObject *__pyx_10genexpr209__pyx_v_pyattributes = NULL; PyObject *__pyx_10genexpr209__pyx_v_pydataSizes = NULL; PyObject *__pyx_10genexpr210__pyx_v_pyattributes = NULL; PyObject *__pyx_10genexpr211__pyx_v_obj = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; PyObject *(*__pyx_t_9)(PyObject *); PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; PyObject *(*__pyx_t_12)(PyObject *); __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_13; std::vector __pyx_t_14; std::vector __pyx_t_15; cudaError_t __pyx_t_16; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemRangeGetAttributes", 0); __Pyx_INCREF(__pyx_v_attributes); /* "cuda/bindings/runtime.pyx":23263 * :py:obj:`~.cudaMemRangeGetAttribute`, :py:obj:`~.cudaMemAdvise`, :py:obj:`~.cudaMemPrefetchAsync`, :py:obj:`~.cuMemRangeGetAttributes` * """ * attributes = [] if attributes is None else attributes # <<<<<<<<<<<<<< * if not all(isinstance(_x, (cudaMemRangeAttribute)) for _x in attributes): * raise TypeError("Argument 'attributes' is not instance of type (expected tuple[cyruntime.cudaMemRangeAttribute] or list[cyruntime.cudaMemRangeAttribute]") */ __pyx_t_2 = (__pyx_v_attributes == Py_None); if (__pyx_t_2) { __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = __pyx_t_3; __pyx_t_3 = 0; } else { __Pyx_INCREF(__pyx_v_attributes); __pyx_t_1 = __pyx_v_attributes; } __Pyx_DECREF_SET(__pyx_v_attributes, __pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":23264 * """ * attributes = [] if attributes is None else attributes * if not all(isinstance(_x, (cudaMemRangeAttribute)) for _x in attributes): # <<<<<<<<<<<<<< * raise TypeError("Argument 'attributes' is not instance of type (expected tuple[cyruntime.cudaMemRangeAttribute] or list[cyruntime.cudaMemRangeAttribute]") * if not all(isinstance(_x, (int)) for _x in dataSizes): */ __pyx_t_1 = __pyx_pf_4cuda_8bindings_7runtime_25cudaMemRangeGetAttributes_genexpr(NULL, __pyx_v_attributes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23264, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_Generator_GetInlinedResult(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23264, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 23264, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_4 = (!__pyx_t_2); if (unlikely(__pyx_t_4)) { /* "cuda/bindings/runtime.pyx":23265 * attributes = [] if attributes is None else attributes * if not all(isinstance(_x, (cudaMemRangeAttribute)) for _x in attributes): * raise TypeError("Argument 'attributes' is not instance of type (expected tuple[cyruntime.cudaMemRangeAttribute] or list[cyruntime.cudaMemRangeAttribute]") # <<<<<<<<<<<<<< * if not all(isinstance(_x, (int)) for _x in dataSizes): * raise TypeError("Argument 'dataSizes' is not instance of type (expected tuple[int] or list[int]") */ __pyx_t_1 = NULL; __Pyx_INCREF(__pyx_builtin_TypeError); __pyx_t_5 = __pyx_builtin_TypeError; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_Argument_attributes_is_not_insta}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23265, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 23265, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":23264 * """ * attributes = [] if attributes is None else attributes * if not all(isinstance(_x, (cudaMemRangeAttribute)) for _x in attributes): # <<<<<<<<<<<<<< * raise TypeError("Argument 'attributes' is not instance of type (expected tuple[cyruntime.cudaMemRangeAttribute] or list[cyruntime.cudaMemRangeAttribute]") * if not all(isinstance(_x, (int)) for _x in dataSizes): */ } /* "cuda/bindings/runtime.pyx":23266 * if not all(isinstance(_x, (cudaMemRangeAttribute)) for _x in attributes): * raise TypeError("Argument 'attributes' is not instance of type (expected tuple[cyruntime.cudaMemRangeAttribute] or list[cyruntime.cudaMemRangeAttribute]") * if not all(isinstance(_x, (int)) for _x in dataSizes): # <<<<<<<<<<<<<< * raise TypeError("Argument 'dataSizes' is not instance of type (expected tuple[int] or list[int]") * pylist = [_HelperCUmem_range_attribute(pyattributes, pydataSizes) for (pyattributes, pydataSizes) in zip(attributes, dataSizes)] */ __pyx_t_3 = __pyx_pf_4cuda_8bindings_7runtime_25cudaMemRangeGetAttributes_3genexpr(NULL, __pyx_v_dataSizes); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23266, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_Generator_GetInlinedResult(__pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23266, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 23266, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_2 = (!__pyx_t_4); if (unlikely(__pyx_t_2)) { /* "cuda/bindings/runtime.pyx":23267 * raise TypeError("Argument 'attributes' is not instance of type (expected tuple[cyruntime.cudaMemRangeAttribute] or list[cyruntime.cudaMemRangeAttribute]") * if not all(isinstance(_x, (int)) for _x in dataSizes): * raise TypeError("Argument 'dataSizes' is not instance of type (expected tuple[int] or list[int]") # <<<<<<<<<<<<<< * pylist = [_HelperCUmem_range_attribute(pyattributes, pydataSizes) for (pyattributes, pydataSizes) in zip(attributes, dataSizes)] * cdef _InputVoidPtrPtrHelper voidStarHelperdata = _InputVoidPtrPtrHelper(pylist) */ __pyx_t_3 = NULL; __Pyx_INCREF(__pyx_builtin_TypeError); __pyx_t_1 = __pyx_builtin_TypeError; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_mstate_global->__pyx_kp_u_Argument_dataSizes_is_not_instan}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23267, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); } __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(0, 23267, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":23266 * if not all(isinstance(_x, (cudaMemRangeAttribute)) for _x in attributes): * raise TypeError("Argument 'attributes' is not instance of type (expected tuple[cyruntime.cudaMemRangeAttribute] or list[cyruntime.cudaMemRangeAttribute]") * if not all(isinstance(_x, (int)) for _x in dataSizes): # <<<<<<<<<<<<<< * raise TypeError("Argument 'dataSizes' is not instance of type (expected tuple[int] or list[int]") * pylist = [_HelperCUmem_range_attribute(pyattributes, pydataSizes) for (pyattributes, pydataSizes) in zip(attributes, dataSizes)] */ } /* "cuda/bindings/runtime.pyx":23268 * if not all(isinstance(_x, (int)) for _x in dataSizes): * raise TypeError("Argument 'dataSizes' is not instance of type (expected tuple[int] or list[int]") * pylist = [_HelperCUmem_range_attribute(pyattributes, pydataSizes) for (pyattributes, pydataSizes) in zip(attributes, dataSizes)] # <<<<<<<<<<<<<< * cdef _InputVoidPtrPtrHelper voidStarHelperdata = _InputVoidPtrPtrHelper(pylist) * cdef void** cyvoidStarHelper_ptr = voidStarHelperdata.cptr */ { /* enter inner scope */ __pyx_t_5 = PyList_New(0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23268, __pyx_L7_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = NULL; __Pyx_INCREF(__pyx_builtin_zip); __pyx_t_7 = __pyx_builtin_zip; __pyx_t_6 = 1; { PyObject *__pyx_callargs[3] = {__pyx_t_3, __pyx_v_attributes, __pyx_v_dataSizes}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_7, __pyx_callargs+__pyx_t_6, (3-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23268, __pyx_L7_error) __Pyx_GOTREF(__pyx_t_1); } if (likely(PyList_CheckExact(__pyx_t_1)) || PyTuple_CheckExact(__pyx_t_1)) { __pyx_t_7 = __pyx_t_1; __Pyx_INCREF(__pyx_t_7); __pyx_t_8 = 0; __pyx_t_9 = NULL; } else { __pyx_t_8 = -1; __pyx_t_7 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 23268, __pyx_L7_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_9 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_7); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 23268, __pyx_L7_error) } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; for (;;) { if (likely(!__pyx_t_9)) { if (likely(PyList_CheckExact(__pyx_t_7))) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_7); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 23268, __pyx_L7_error) #endif if (__pyx_t_8 >= __pyx_temp) break; } __pyx_t_1 = __Pyx_PyList_GetItemRef(__pyx_t_7, __pyx_t_8); ++__pyx_t_8; } else { { Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_7); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 23268, __pyx_L7_error) #endif if (__pyx_t_8 >= __pyx_temp) break; } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_1 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_7, __pyx_t_8)); #else __pyx_t_1 = __Pyx_PySequence_ITEM(__pyx_t_7, __pyx_t_8); #endif ++__pyx_t_8; } if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23268, __pyx_L7_error) } else { __pyx_t_1 = __pyx_t_9(__pyx_t_7); if (unlikely(!__pyx_t_1)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 23268, __pyx_L7_error) PyErr_Clear(); } break; } } __Pyx_GOTREF(__pyx_t_1); if ((likely(PyTuple_CheckExact(__pyx_t_1))) || (PyList_CheckExact(__pyx_t_1))) { PyObject* sequence = __pyx_t_1; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(0, 23268, __pyx_L7_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS if (likely(PyTuple_CheckExact(sequence))) { __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __Pyx_INCREF(__pyx_t_3); __pyx_t_10 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_10); } else { __pyx_t_3 = __Pyx_PyList_GetItemRef(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23268, __pyx_L7_error) __Pyx_XGOTREF(__pyx_t_3); __pyx_t_10 = __Pyx_PyList_GetItemRef(sequence, 1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 23268, __pyx_L7_error) __Pyx_XGOTREF(__pyx_t_10); } #else __pyx_t_3 = __Pyx_PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23268, __pyx_L7_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_10 = __Pyx_PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 23268, __pyx_L7_error) __Pyx_GOTREF(__pyx_t_10); #endif __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else { Py_ssize_t index = -1; __pyx_t_11 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 23268, __pyx_L7_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_12 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_11); index = 0; __pyx_t_3 = __pyx_t_12(__pyx_t_11); if (unlikely(!__pyx_t_3)) goto __pyx_L10_unpacking_failed; __Pyx_GOTREF(__pyx_t_3); index = 1; __pyx_t_10 = __pyx_t_12(__pyx_t_11); if (unlikely(!__pyx_t_10)) goto __pyx_L10_unpacking_failed; __Pyx_GOTREF(__pyx_t_10); if (__Pyx_IternextUnpackEndCheck(__pyx_t_12(__pyx_t_11), 2) < (0)) __PYX_ERR(0, 23268, __pyx_L7_error) __pyx_t_12 = NULL; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; goto __pyx_L11_unpacking_done; __pyx_L10_unpacking_failed:; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_12 = NULL; if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); __PYX_ERR(0, 23268, __pyx_L7_error) __pyx_L11_unpacking_done:; } __Pyx_XDECREF_SET(__pyx_10genexpr209__pyx_v_pyattributes, __pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF_SET(__pyx_10genexpr209__pyx_v_pydataSizes, __pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperCUmem_range_attribute); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperCUmem_range_attribute); __pyx_t_6 = 1; { PyObject *__pyx_callargs[3] = {__pyx_t_10, __pyx_10genexpr209__pyx_v_pyattributes, __pyx_10genexpr209__pyx_v_pydataSizes}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_6, (3-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23268, __pyx_L7_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } if (unlikely(__Pyx_ListComp_Append(__pyx_t_5, (PyObject*)__pyx_t_1))) __PYX_ERR(0, 23268, __pyx_L7_error) __Pyx_DECREF((PyObject *)__pyx_t_1); __pyx_t_1 = 0; } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_10genexpr209__pyx_v_pyattributes); __pyx_10genexpr209__pyx_v_pyattributes = 0; __Pyx_XDECREF(__pyx_10genexpr209__pyx_v_pydataSizes); __pyx_10genexpr209__pyx_v_pydataSizes = 0; goto __pyx_L13_exit_scope; __pyx_L7_error:; __Pyx_XDECREF(__pyx_10genexpr209__pyx_v_pyattributes); __pyx_10genexpr209__pyx_v_pyattributes = 0; __Pyx_XDECREF(__pyx_10genexpr209__pyx_v_pydataSizes); __pyx_10genexpr209__pyx_v_pydataSizes = 0; goto __pyx_L1_error; __pyx_L13_exit_scope:; } /* exit inner scope */ __pyx_v_pylist = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":23269 * raise TypeError("Argument 'dataSizes' is not instance of type (expected tuple[int] or list[int]") * pylist = [_HelperCUmem_range_attribute(pyattributes, pydataSizes) for (pyattributes, pydataSizes) in zip(attributes, dataSizes)] * cdef _InputVoidPtrPtrHelper voidStarHelperdata = _InputVoidPtrPtrHelper(pylist) # <<<<<<<<<<<<<< * cdef void** cyvoidStarHelper_ptr = voidStarHelperdata.cptr * cdef vector[size_t] cydataSizes = dataSizes */ __pyx_t_7 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__InputVoidPtrPtrHelper); __pyx_t_1 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__InputVoidPtrPtrHelper); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_7, __pyx_v_pylist}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23269, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_voidStarHelperdata = ((struct __pyx_obj_4cuda_8bindings_7runtime__InputVoidPtrPtrHelper *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":23270 * pylist = [_HelperCUmem_range_attribute(pyattributes, pydataSizes) for (pyattributes, pydataSizes) in zip(attributes, dataSizes)] * cdef _InputVoidPtrPtrHelper voidStarHelperdata = _InputVoidPtrPtrHelper(pylist) * cdef void** cyvoidStarHelper_ptr = voidStarHelperdata.cptr # <<<<<<<<<<<<<< * cdef vector[size_t] cydataSizes = dataSizes * cdef vector[cyruntime.cudaMemRangeAttribute] cyattributes = [pyattributes.value for pyattributes in (attributes)] */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_voidStarHelperdata), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23270, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_13 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_5); if (unlikely((__pyx_t_13 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 23270, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_cyvoidStarHelper_ptr = ((void **)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_13)); /* "cuda/bindings/runtime.pyx":23271 * cdef _InputVoidPtrPtrHelper voidStarHelperdata = _InputVoidPtrPtrHelper(pylist) * cdef void** cyvoidStarHelper_ptr = voidStarHelperdata.cptr * cdef vector[size_t] cydataSizes = dataSizes # <<<<<<<<<<<<<< * cdef vector[cyruntime.cudaMemRangeAttribute] cyattributes = [pyattributes.value for pyattributes in (attributes)] * if numAttributes > len(dataSizes): raise RuntimeError("List is too small: " + str(len(dataSizes)) + " < " + str(numAttributes)) */ __pyx_t_14 = __pyx_convert_vector_from_py_size_t(__pyx_v_dataSizes); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 23271, __pyx_L1_error) __pyx_v_cydataSizes = __PYX_STD_MOVE_IF_SUPPORTED(__pyx_t_14); /* "cuda/bindings/runtime.pyx":23272 * cdef void** cyvoidStarHelper_ptr = voidStarHelperdata.cptr * cdef vector[size_t] cydataSizes = dataSizes * cdef vector[cyruntime.cudaMemRangeAttribute] cyattributes = [pyattributes.value for pyattributes in (attributes)] # <<<<<<<<<<<<<< * if numAttributes > len(dataSizes): raise RuntimeError("List is too small: " + str(len(dataSizes)) + " < " + str(numAttributes)) * if numAttributes > len(attributes): raise RuntimeError("List is too small: " + str(len(attributes)) + " < " + str(numAttributes)) */ { /* enter inner scope */ __pyx_t_5 = PyList_New(0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23272, __pyx_L16_error) __Pyx_GOTREF(__pyx_t_5); if (likely(PyList_CheckExact(__pyx_v_attributes)) || PyTuple_CheckExact(__pyx_v_attributes)) { __pyx_t_1 = __pyx_v_attributes; __Pyx_INCREF(__pyx_t_1); __pyx_t_8 = 0; __pyx_t_9 = NULL; } else { __pyx_t_8 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_v_attributes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23272, __pyx_L16_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_9 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 23272, __pyx_L16_error) } for (;;) { if (likely(!__pyx_t_9)) { if (likely(PyList_CheckExact(__pyx_t_1))) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 23272, __pyx_L16_error) #endif if (__pyx_t_8 >= __pyx_temp) break; } __pyx_t_7 = __Pyx_PyList_GetItemRef(__pyx_t_1, __pyx_t_8); ++__pyx_t_8; } else { { Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 23272, __pyx_L16_error) #endif if (__pyx_t_8 >= __pyx_temp) break; } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_8)); #else __pyx_t_7 = __Pyx_PySequence_ITEM(__pyx_t_1, __pyx_t_8); #endif ++__pyx_t_8; } if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 23272, __pyx_L16_error) } else { __pyx_t_7 = __pyx_t_9(__pyx_t_1); if (unlikely(!__pyx_t_7)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 23272, __pyx_L16_error) PyErr_Clear(); } break; } } __Pyx_GOTREF(__pyx_t_7); __Pyx_XDECREF_SET(__pyx_10genexpr210__pyx_v_pyattributes, __pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_10genexpr210__pyx_v_pyattributes, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 23272, __pyx_L16_error) __Pyx_GOTREF(__pyx_t_7); if (unlikely(__Pyx_ListComp_Append(__pyx_t_5, (PyObject*)__pyx_t_7))) __PYX_ERR(0, 23272, __pyx_L16_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_10genexpr210__pyx_v_pyattributes); __pyx_10genexpr210__pyx_v_pyattributes = 0; goto __pyx_L20_exit_scope; __pyx_L16_error:; __Pyx_XDECREF(__pyx_10genexpr210__pyx_v_pyattributes); __pyx_10genexpr210__pyx_v_pyattributes = 0; goto __pyx_L1_error; __pyx_L20_exit_scope:; } /* exit inner scope */ __pyx_t_15 = __pyx_convert_vector_from_py_enum__cudaMemRangeAttribute(__pyx_t_5); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 23272, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_cyattributes = __PYX_STD_MOVE_IF_SUPPORTED(__pyx_t_15); /* "cuda/bindings/runtime.pyx":23273 * cdef vector[size_t] cydataSizes = dataSizes * cdef vector[cyruntime.cudaMemRangeAttribute] cyattributes = [pyattributes.value for pyattributes in (attributes)] * if numAttributes > len(dataSizes): raise RuntimeError("List is too small: " + str(len(dataSizes)) + " < " + str(numAttributes)) # <<<<<<<<<<<<<< * if numAttributes > len(attributes): raise RuntimeError("List is too small: " + str(len(attributes)) + " < " + str(numAttributes)) * cydevPtr = _HelperInputVoidPtr(devPtr) */ __pyx_t_8 = PyObject_Length(__pyx_v_dataSizes); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 23273, __pyx_L1_error) __pyx_t_2 = (__pyx_v_numAttributes > ((size_t)__pyx_t_8)); if (unlikely(__pyx_t_2)) { __pyx_t_1 = NULL; __Pyx_INCREF(__pyx_builtin_RuntimeError); __pyx_t_7 = __pyx_builtin_RuntimeError; __pyx_t_8 = PyObject_Length(__pyx_v_dataSizes); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 23273, __pyx_L1_error) __pyx_t_3 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23273, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 23273, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_List_is_too_small, __pyx_t_10); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23273, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_3, __pyx_mstate_global->__pyx_kp_u__6); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 23273, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyLong_FromSize_t(__pyx_v_numAttributes); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23273, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 23273, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_10, __pyx_t_11); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23273, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_t_3}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_7, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23273, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); } __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(0, 23273, __pyx_L1_error) } /* "cuda/bindings/runtime.pyx":23274 * cdef vector[cyruntime.cudaMemRangeAttribute] cyattributes = [pyattributes.value for pyattributes in (attributes)] * if numAttributes > len(dataSizes): raise RuntimeError("List is too small: " + str(len(dataSizes)) + " < " + str(numAttributes)) * if numAttributes > len(attributes): raise RuntimeError("List is too small: " + str(len(attributes)) + " < " + str(numAttributes)) # <<<<<<<<<<<<<< * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr */ __pyx_t_8 = PyObject_Length(__pyx_v_attributes); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 23274, __pyx_L1_error) __pyx_t_2 = (__pyx_v_numAttributes > ((size_t)__pyx_t_8)); if (unlikely(__pyx_t_2)) { __pyx_t_7 = NULL; __Pyx_INCREF(__pyx_builtin_RuntimeError); __pyx_t_3 = __pyx_builtin_RuntimeError; __pyx_t_8 = PyObject_Length(__pyx_v_attributes); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 23274, __pyx_L1_error) __pyx_t_1 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23274, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 23274, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_List_is_too_small, __pyx_t_11); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23274, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_11 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_1, __pyx_mstate_global->__pyx_kp_u__6); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 23274, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyLong_FromSize_t(__pyx_v_numAttributes); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23274, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_1); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 23274, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_11, __pyx_t_10); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23274, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_7, __pyx_t_1}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23274, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); } __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(0, 23274, __pyx_L1_error) } /* "cuda/bindings/runtime.pyx":23275 * if numAttributes > len(dataSizes): raise RuntimeError("List is too small: " + str(len(dataSizes)) + " < " + str(numAttributes)) * if numAttributes > len(attributes): raise RuntimeError("List is too small: " + str(len(attributes)) + " < " + str(numAttributes)) * cydevPtr = _HelperInputVoidPtr(devPtr) # <<<<<<<<<<<<<< * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_1 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_devPtr}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23275, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_cydevPtr = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":23276 * if numAttributes > len(attributes): raise RuntimeError("List is too small: " + str(len(attributes)) + " < " + str(numAttributes)) * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemRangeGetAttributes(cyvoidStarHelper_ptr, cydataSizes.data(), cyattributes.data(), numAttributes, cydevPtr_ptr, count) */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cydevPtr), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23276, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_13 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_5); if (unlikely((__pyx_t_13 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 23276, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_cydevPtr_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_13)); /* "cuda/bindings/runtime.pyx":23277 * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemRangeGetAttributes(cyvoidStarHelper_ptr, cydataSizes.data(), cyattributes.data(), numAttributes, cydevPtr_ptr, count) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":23278 * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: * err = cyruntime.cudaMemRangeGetAttributes(cyvoidStarHelper_ptr, cydataSizes.data(), cyattributes.data(), numAttributes, cydevPtr_ptr, count) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_16 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemRangeGetAttributes(__pyx_v_cyvoidStarHelper_ptr, __pyx_v_cydataSizes.data(), __pyx_v_cyattributes.data(), __pyx_v_numAttributes, __pyx_v_cydevPtr_ptr, __pyx_v_count); if (unlikely(__pyx_t_16 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23278, __pyx_L24_error) __pyx_v_err = __pyx_t_16; } /* "cuda/bindings/runtime.pyx":23277 * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemRangeGetAttributes(cyvoidStarHelper_ptr, cydataSizes.data(), cyattributes.data(), numAttributes, cydevPtr_ptr, count) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L25; } __pyx_L24_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L25:; } } /* "cuda/bindings/runtime.pyx":23279 * with nogil: * err = cyruntime.cudaMemRangeGetAttributes(cyvoidStarHelper_ptr, cydataSizes.data(), cyattributes.data(), numAttributes, cydevPtr_ptr, count) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], [obj.pyObj() for obj in pylist]) */ __pyx_t_2 = (__pyx_v_err != cudaSuccess); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":23280 * err = cyruntime.cudaMemRangeGetAttributes(cyvoidStarHelper_ptr, cydataSizes.data(), cyattributes.data(), numAttributes, cydevPtr_ptr, count) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], [obj.pyObj() for obj in pylist]) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23280, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23280, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23280, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23280, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 23280, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, Py_None) != (0)) __PYX_ERR(0, 23280, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":23279 * with nogil: * err = cyruntime.cudaMemRangeGetAttributes(cyvoidStarHelper_ptr, cydataSizes.data(), cyattributes.data(), numAttributes, cydevPtr_ptr, count) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], [obj.pyObj() for obj in pylist]) */ } /* "cuda/bindings/runtime.pyx":23281 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], [obj.pyObj() for obj in pylist]) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23281, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23281, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23281, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; { /* enter inner scope */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23281, __pyx_L29_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = __pyx_v_pylist; __Pyx_INCREF(__pyx_t_1); __pyx_t_8 = 0; for (;;) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 23281, __pyx_L29_error) #endif if (__pyx_t_8 >= __pyx_temp) break; } __pyx_t_7 = __Pyx_PyList_GetItemRef(__pyx_t_1, __pyx_t_8); ++__pyx_t_8; if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 23281, __pyx_L29_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_XDECREF_SET(__pyx_10genexpr211__pyx_v_obj, __pyx_t_7); __pyx_t_7 = 0; __pyx_t_10 = __pyx_10genexpr211__pyx_v_obj; __Pyx_INCREF(__pyx_t_10); __pyx_t_6 = 0; { PyObject *__pyx_callargs[2] = {__pyx_t_10, NULL}; __pyx_t_7 = __Pyx_PyObject_FastCallMethod(__pyx_mstate_global->__pyx_n_u_pyObj, __pyx_callargs+__pyx_t_6, (1-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 23281, __pyx_L29_error) __Pyx_GOTREF(__pyx_t_7); } if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_7))) __PYX_ERR(0, 23281, __pyx_L29_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_10genexpr211__pyx_v_obj); __pyx_10genexpr211__pyx_v_obj = 0; goto __pyx_L33_exit_scope; __pyx_L29_error:; __Pyx_XDECREF(__pyx_10genexpr211__pyx_v_obj); __pyx_10genexpr211__pyx_v_obj = 0; goto __pyx_L1_error; __pyx_L33_exit_scope:; } /* exit inner scope */ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23281, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 23281, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_3) != (0)) __PYX_ERR(0, 23281, __pyx_L1_error); __pyx_t_5 = 0; __pyx_t_3 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":23205 * return (_dict_cudaError_t[err], cydata.pyObj()) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemRangeGetAttributes(dataSizes : tuple[int] | list[int], attributes : Optional[tuple[cudaMemRangeAttribute] | list[cudaMemRangeAttribute]], size_t numAttributes, devPtr, size_t count): * """ Query attributes of a given memory range. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_10); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemRangeGetAttributes", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pylist); __Pyx_XDECREF((PyObject *)__pyx_v_voidStarHelperdata); __Pyx_XDECREF((PyObject *)__pyx_v_cydevPtr); __Pyx_XDECREF(__pyx_gb_4cuda_8bindings_7runtime_25cudaMemRangeGetAttributes_2generator91); __Pyx_XDECREF(__pyx_gb_4cuda_8bindings_7runtime_25cudaMemRangeGetAttributes_5generator92); __Pyx_XDECREF(__pyx_10genexpr209__pyx_v_pyattributes); __Pyx_XDECREF(__pyx_10genexpr209__pyx_v_pydataSizes); __Pyx_XDECREF(__pyx_10genexpr210__pyx_v_pyattributes); __Pyx_XDECREF(__pyx_10genexpr211__pyx_v_obj); __Pyx_XDECREF(__pyx_v_attributes); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":23283 * return (_dict_cudaError_t[err], [obj.pyObj() for obj in pylist]) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpyToArray(dst, size_t wOffset, size_t hOffset, src, size_t count, kind not None : cudaMemcpyKind): * """ Copies data between host and device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_287cudaMemcpyToArray(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_286cudaMemcpyToArray, "cudaMemcpyToArray(dst, size_t wOffset, size_t hOffset, src, size_t count, kind: cudaMemcpyKind)\n\nCopies data between host and device.\n\n[Deprecated]\n\nCopies `count` bytes from the memory area pointed to by `src` to the\nCUDA array `dst` starting at `hOffset` rows and `wOffset` bytes from\nthe upper left corner, where `kind` specifies the direction of the\ncopy, and must be one of :py:obj:`~.cudaMemcpyHostToHost`,\n:py:obj:`~.cudaMemcpyHostToDevice`, :py:obj:`~.cudaMemcpyDeviceToHost`,\n:py:obj:`~.cudaMemcpyDeviceToDevice`, or :py:obj:`~.cudaMemcpyDefault`.\nPassing :py:obj:`~.cudaMemcpyDefault` is recommended, in which case the\ntype of transfer is inferred from the pointer values. However,\n:py:obj:`~.cudaMemcpyDefault` is only allowed on systems that support\nunified virtual addressing.\n\nParameters\n----------\ndst : :py:obj:`~.cudaArray_t`\n Destination memory address\nwOffset : size_t\n Destination starting X offset (columns in bytes)\nhOffset : size_t\n Destination starting Y offset (rows)\nsrc : Any\n Source memory address\ncount : size_t\n Size in bytes to copy\nkind : :py:obj:`~.cudaMemcpyKind`\n Type of transfer\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidMemcpyDirection`\n\nSee Also\n--------\n:py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpy2D`, :py:obj:`~.cudaMemcpy2DToArray`, :py:obj:`~.cudaMemcpyFromArray`, :py:obj:`~.cudaMemcpy2DFromArray`, :py:obj:`~.cudaMemcpyArrayToArray`, :py:obj:`~.cudaMemcpy2DArrayToArray`, :py:obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy2DAsync`, :py:obj:`~.cudaMemcpyToArrayAsync`, :py:obj:`~.cudaMemcpy2DToArrayAsync`, :py:obj:`~.cudaMemcpyFromArrayAsync`, :py:obj:`~.cudaMemcpy2DFromArrayAsync`, :py:obj:`~.cudaMemcpyToSymbolAsync`, :py:obj:`~.cudaMemcpyFromSymbolAsync`, :py:obj:`~.cuMemcpyHtoA`, :py:obj:`~.cuMemcpyDtoA`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_287cudaMemcpyToArray = {"cudaMemcpyToArray", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_287cudaMemcpyToArray, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_286cudaMemcpyToArray}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_287cudaMemcpyToArray(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_dst = 0; size_t __pyx_v_wOffset; size_t __pyx_v_hOffset; PyObject *__pyx_v_src = 0; size_t __pyx_v_count; PyObject *__pyx_v_kind = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[6] = {0,0,0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemcpyToArray (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_dst_2,&__pyx_mstate_global->__pyx_n_u_wOffset,&__pyx_mstate_global->__pyx_n_u_hOffset,&__pyx_mstate_global->__pyx_n_u_src_2,&__pyx_mstate_global->__pyx_n_u_count,&__pyx_mstate_global->__pyx_n_u_kind_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23283, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 6: values[5] = __Pyx_ArgRef_FASTCALL(__pyx_args, 5); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[5])) __PYX_ERR(0, 23283, __pyx_L3_error) CYTHON_FALLTHROUGH; case 5: values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 23283, __pyx_L3_error) CYTHON_FALLTHROUGH; case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 23283, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 23283, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23283, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23283, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemcpyToArray", 0) < (0)) __PYX_ERR(0, 23283, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 6; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemcpyToArray", 1, 6, 6, i); __PYX_ERR(0, 23283, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 6)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23283, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23283, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 23283, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 23283, __pyx_L3_error) values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 23283, __pyx_L3_error) values[5] = __Pyx_ArgRef_FASTCALL(__pyx_args, 5); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[5])) __PYX_ERR(0, 23283, __pyx_L3_error) } __pyx_v_dst = values[0]; __pyx_v_wOffset = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_wOffset == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 23284, __pyx_L3_error) __pyx_v_hOffset = __Pyx_PyLong_As_size_t(values[2]); if (unlikely((__pyx_v_hOffset == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 23284, __pyx_L3_error) __pyx_v_src = values[3]; __pyx_v_count = __Pyx_PyLong_As_size_t(values[4]); if (unlikely((__pyx_v_count == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 23284, __pyx_L3_error) __pyx_v_kind = values[5]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemcpyToArray", 1, 6, 6, __pyx_nargs); __PYX_ERR(0, 23283, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpyToArray", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_kind) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "kind"); __PYX_ERR(0, 23284, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_286cudaMemcpyToArray(__pyx_self, __pyx_v_dst, __pyx_v_wOffset, __pyx_v_hOffset, __pyx_v_src, __pyx_v_count, __pyx_v_kind); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_286cudaMemcpyToArray(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_dst, size_t __pyx_v_wOffset, size_t __pyx_v_hOffset, PyObject *__pyx_v_src, size_t __pyx_v_count, PyObject *__pyx_v_kind) { cudaArray_t __pyx_v_cydst; PyObject *__pyx_v_pdst = NULL; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cysrc = NULL; void *__pyx_v_cysrc_ptr; enum cudaMemcpyKind __pyx_v_cykind; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; enum cudaMemcpyKind __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemcpyToArray", 0); /* "cuda/bindings/runtime.pyx":23325 * """ * cdef cyruntime.cudaArray_t cydst * if dst is None: # <<<<<<<<<<<<<< * pdst = 0 * elif isinstance(dst, (cudaArray_t,)): */ __pyx_t_1 = (__pyx_v_dst == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":23326 * cdef cyruntime.cudaArray_t cydst * if dst is None: * pdst = 0 # <<<<<<<<<<<<<< * elif isinstance(dst, (cudaArray_t,)): * pdst = int(dst) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pdst = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":23325 * """ * cdef cyruntime.cudaArray_t cydst * if dst is None: # <<<<<<<<<<<<<< * pdst = 0 * elif isinstance(dst, (cudaArray_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":23327 * if dst is None: * pdst = 0 * elif isinstance(dst, (cudaArray_t,)): # <<<<<<<<<<<<<< * pdst = int(dst) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_dst, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":23328 * pdst = 0 * elif isinstance(dst, (cudaArray_t,)): * pdst = int(dst) # <<<<<<<<<<<<<< * else: * pdst = int(cudaArray_t(dst)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_dst); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23328, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_pdst = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":23327 * if dst is None: * pdst = 0 * elif isinstance(dst, (cudaArray_t,)): # <<<<<<<<<<<<<< * pdst = int(dst) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":23330 * pdst = int(dst) * else: * pdst = int(cudaArray_t(dst)) # <<<<<<<<<<<<<< * cydst = pdst * cysrc = _HelperInputVoidPtr(src) */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_dst}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23330, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23330, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_pdst = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":23331 * else: * pdst = int(cudaArray_t(dst)) * cydst = pdst # <<<<<<<<<<<<<< * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pdst); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 23331, __pyx_L1_error) __pyx_v_cydst = ((cudaArray_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":23332 * pdst = int(cudaArray_t(dst)) * cydst = pdst * cysrc = _HelperInputVoidPtr(src) # <<<<<<<<<<<<<< * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_src}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23332, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_4); } __pyx_v_cysrc = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_4); __pyx_t_4 = 0; /* "cuda/bindings/runtime.pyx":23333 * cydst = pdst * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cysrc), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23333, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_4); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 23333, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_cysrc_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":23334 * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemcpyToArray(cydst, wOffset, hOffset, cysrc_ptr, count, cykind) */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_kind, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23334, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_7 = ((enum cudaMemcpyKind)__Pyx_PyLong_As_enum__cudaMemcpyKind(__pyx_t_4)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 23334, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_cykind = __pyx_t_7; /* "cuda/bindings/runtime.pyx":23335 * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpyToArray(cydst, wOffset, hOffset, cysrc_ptr, count, cykind) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":23336 * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: * err = cyruntime.cudaMemcpyToArray(cydst, wOffset, hOffset, cysrc_ptr, count, cykind) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemcpyToArray(__pyx_v_cydst, __pyx_v_wOffset, __pyx_v_hOffset, __pyx_v_cysrc_ptr, __pyx_v_count, __pyx_v_cykind); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23336, __pyx_L5_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":23335 * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpyToArray(cydst, wOffset, hOffset, cysrc_ptr, count, cykind) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L6; } __pyx_L5_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L6:; } } /* "cuda/bindings/runtime.pyx":23337 * with nogil: * err = cyruntime.cudaMemcpyToArray(cydst, wOffset, hOffset, cysrc_ptr, count, cykind) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23337, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23337, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23337, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23337, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 23337, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":23283 * return (_dict_cudaError_t[err], [obj.pyObj() for obj in pylist]) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpyToArray(dst, size_t wOffset, size_t hOffset, src, size_t count, kind not None : cudaMemcpyKind): * """ Copies data between host and device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpyToArray", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pdst); __Pyx_XDECREF((PyObject *)__pyx_v_cysrc); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":23339 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpyFromArray(dst, src, size_t wOffset, size_t hOffset, size_t count, kind not None : cudaMemcpyKind): * """ Copies data between host and device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_289cudaMemcpyFromArray(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_288cudaMemcpyFromArray, "cudaMemcpyFromArray(dst, src, size_t wOffset, size_t hOffset, size_t count, kind: cudaMemcpyKind)\n\nCopies data between host and device.\n\n[Deprecated]\n\nCopies `count` bytes from the CUDA array `src` starting at `hOffset`\nrows and `wOffset` bytes from the upper left corner to the memory area\npointed to by `dst`, where `kind` specifies the direction of the copy,\nand must be one of :py:obj:`~.cudaMemcpyHostToHost`,\n:py:obj:`~.cudaMemcpyHostToDevice`, :py:obj:`~.cudaMemcpyDeviceToHost`,\n:py:obj:`~.cudaMemcpyDeviceToDevice`, or :py:obj:`~.cudaMemcpyDefault`.\nPassing :py:obj:`~.cudaMemcpyDefault` is recommended, in which case the\ntype of transfer is inferred from the pointer values. However,\n:py:obj:`~.cudaMemcpyDefault` is only allowed on systems that support\nunified virtual addressing.\n\nParameters\n----------\ndst : Any\n Destination memory address\nsrc : :py:obj:`~.cudaArray_const_t`\n Source memory address\nwOffset : size_t\n Source starting X offset (columns in bytes)\nhOffset : size_t\n Source starting Y offset (rows)\ncount : size_t\n Size in bytes to copy\nkind : :py:obj:`~.cudaMemcpyKind`\n Type of transfer\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidMemcpyDirection`\n\nSee Also\n--------\n:py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpy2D`, :py:obj:`~.cudaMemcpyToArray`, :py:obj:`~.cudaMemcpy2DToArray`, :py:obj:`~.cudaMemcpy2DFromArray`, :py:obj:`~.cudaMemcpyArrayToArray`, :py:obj:`~.cudaMemcpy2DArrayToArray`, :py:obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy2DAsync`, :py:obj:`~.cudaMemcpyToArrayAsync`, :py:obj:`~.cudaMemcpy2DToArrayAsync`, :py:obj:`~.cudaMemcpyFromArrayAsync`, :py:obj:`~.cudaMemcpy2DFromArrayAsync`, :py:obj:`~.cudaMemcpyToSymbolAsync`, :py:obj:`~.cudaMemcpyFromSymbolAsync`, :py:obj:`~.cuMemcpyAtoH`, :py:obj:`~.cuMemcpyAtoD`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_289cudaMemcpyFromArray = {"cudaMemcpyFromArray", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_289cudaMemcpyFromArray, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_288cudaMemcpyFromArray}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_289cudaMemcpyFromArray(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_dst = 0; PyObject *__pyx_v_src = 0; size_t __pyx_v_wOffset; size_t __pyx_v_hOffset; size_t __pyx_v_count; PyObject *__pyx_v_kind = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[6] = {0,0,0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemcpyFromArray (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_dst_2,&__pyx_mstate_global->__pyx_n_u_src_2,&__pyx_mstate_global->__pyx_n_u_wOffset,&__pyx_mstate_global->__pyx_n_u_hOffset,&__pyx_mstate_global->__pyx_n_u_count,&__pyx_mstate_global->__pyx_n_u_kind_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23339, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 6: values[5] = __Pyx_ArgRef_FASTCALL(__pyx_args, 5); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[5])) __PYX_ERR(0, 23339, __pyx_L3_error) CYTHON_FALLTHROUGH; case 5: values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 23339, __pyx_L3_error) CYTHON_FALLTHROUGH; case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 23339, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 23339, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23339, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23339, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemcpyFromArray", 0) < (0)) __PYX_ERR(0, 23339, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 6; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemcpyFromArray", 1, 6, 6, i); __PYX_ERR(0, 23339, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 6)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23339, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23339, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 23339, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 23339, __pyx_L3_error) values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 23339, __pyx_L3_error) values[5] = __Pyx_ArgRef_FASTCALL(__pyx_args, 5); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[5])) __PYX_ERR(0, 23339, __pyx_L3_error) } __pyx_v_dst = values[0]; __pyx_v_src = values[1]; __pyx_v_wOffset = __Pyx_PyLong_As_size_t(values[2]); if (unlikely((__pyx_v_wOffset == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 23340, __pyx_L3_error) __pyx_v_hOffset = __Pyx_PyLong_As_size_t(values[3]); if (unlikely((__pyx_v_hOffset == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 23340, __pyx_L3_error) __pyx_v_count = __Pyx_PyLong_As_size_t(values[4]); if (unlikely((__pyx_v_count == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 23340, __pyx_L3_error) __pyx_v_kind = values[5]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemcpyFromArray", 1, 6, 6, __pyx_nargs); __PYX_ERR(0, 23339, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpyFromArray", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_kind) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "kind"); __PYX_ERR(0, 23340, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_288cudaMemcpyFromArray(__pyx_self, __pyx_v_dst, __pyx_v_src, __pyx_v_wOffset, __pyx_v_hOffset, __pyx_v_count, __pyx_v_kind); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_288cudaMemcpyFromArray(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src, size_t __pyx_v_wOffset, size_t __pyx_v_hOffset, size_t __pyx_v_count, PyObject *__pyx_v_kind) { cudaArray_const_t __pyx_v_cysrc; PyObject *__pyx_v_psrc = NULL; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cydst = NULL; void *__pyx_v_cydst_ptr; enum cudaMemcpyKind __pyx_v_cykind; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; enum cudaMemcpyKind __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemcpyFromArray", 0); /* "cuda/bindings/runtime.pyx":23381 * """ * cdef cyruntime.cudaArray_const_t cysrc * if src is None: # <<<<<<<<<<<<<< * psrc = 0 * elif isinstance(src, (cudaArray_const_t,)): */ __pyx_t_1 = (__pyx_v_src == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":23382 * cdef cyruntime.cudaArray_const_t cysrc * if src is None: * psrc = 0 # <<<<<<<<<<<<<< * elif isinstance(src, (cudaArray_const_t,)): * psrc = int(src) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_psrc = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":23381 * """ * cdef cyruntime.cudaArray_const_t cysrc * if src is None: # <<<<<<<<<<<<<< * psrc = 0 * elif isinstance(src, (cudaArray_const_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":23383 * if src is None: * psrc = 0 * elif isinstance(src, (cudaArray_const_t,)): # <<<<<<<<<<<<<< * psrc = int(src) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_src, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_const_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":23384 * psrc = 0 * elif isinstance(src, (cudaArray_const_t,)): * psrc = int(src) # <<<<<<<<<<<<<< * else: * psrc = int(cudaArray_const_t(src)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_src); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23384, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_psrc = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":23383 * if src is None: * psrc = 0 * elif isinstance(src, (cudaArray_const_t,)): # <<<<<<<<<<<<<< * psrc = int(src) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":23386 * psrc = int(src) * else: * psrc = int(cudaArray_const_t(src)) # <<<<<<<<<<<<<< * cysrc = psrc * cydst = _HelperInputVoidPtr(dst) */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_const_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_const_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_src}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23386, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23386, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_psrc = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":23387 * else: * psrc = int(cudaArray_const_t(src)) * cysrc = psrc # <<<<<<<<<<<<<< * cydst = _HelperInputVoidPtr(dst) * cdef void* cydst_ptr = cydst.cptr */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_psrc); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 23387, __pyx_L1_error) __pyx_v_cysrc = ((cudaArray_const_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":23388 * psrc = int(cudaArray_const_t(src)) * cysrc = psrc * cydst = _HelperInputVoidPtr(dst) # <<<<<<<<<<<<<< * cdef void* cydst_ptr = cydst.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_dst}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23388, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_4); } __pyx_v_cydst = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_4); __pyx_t_4 = 0; /* "cuda/bindings/runtime.pyx":23389 * cysrc = psrc * cydst = _HelperInputVoidPtr(dst) * cdef void* cydst_ptr = cydst.cptr # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cydst), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23389, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_4); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 23389, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_cydst_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":23390 * cydst = _HelperInputVoidPtr(dst) * cdef void* cydst_ptr = cydst.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemcpyFromArray(cydst_ptr, cysrc, wOffset, hOffset, count, cykind) */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_kind, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23390, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_7 = ((enum cudaMemcpyKind)__Pyx_PyLong_As_enum__cudaMemcpyKind(__pyx_t_4)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 23390, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_cykind = __pyx_t_7; /* "cuda/bindings/runtime.pyx":23391 * cdef void* cydst_ptr = cydst.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpyFromArray(cydst_ptr, cysrc, wOffset, hOffset, count, cykind) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":23392 * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: * err = cyruntime.cudaMemcpyFromArray(cydst_ptr, cysrc, wOffset, hOffset, count, cykind) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemcpyFromArray(__pyx_v_cydst_ptr, __pyx_v_cysrc, __pyx_v_wOffset, __pyx_v_hOffset, __pyx_v_count, __pyx_v_cykind); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23392, __pyx_L5_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":23391 * cdef void* cydst_ptr = cydst.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpyFromArray(cydst_ptr, cysrc, wOffset, hOffset, count, cykind) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L6; } __pyx_L5_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L6:; } } /* "cuda/bindings/runtime.pyx":23393 * with nogil: * err = cyruntime.cudaMemcpyFromArray(cydst_ptr, cysrc, wOffset, hOffset, count, cykind) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23393, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23393, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23393, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23393, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 23393, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":23339 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpyFromArray(dst, src, size_t wOffset, size_t hOffset, size_t count, kind not None : cudaMemcpyKind): * """ Copies data between host and device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpyFromArray", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_psrc); __Pyx_XDECREF((PyObject *)__pyx_v_cydst); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":23395 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpyArrayToArray(dst, size_t wOffsetDst, size_t hOffsetDst, src, size_t wOffsetSrc, size_t hOffsetSrc, size_t count, kind not None : cudaMemcpyKind): * """ Copies data between host and device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_291cudaMemcpyArrayToArray(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_290cudaMemcpyArrayToArray, "cudaMemcpyArrayToArray(dst, size_t wOffsetDst, size_t hOffsetDst, src, size_t wOffsetSrc, size_t hOffsetSrc, size_t count, kind: cudaMemcpyKind)\n\nCopies data between host and device.\n\n[Deprecated]\n\nCopies `count` bytes from the CUDA array `src` starting at `hOffsetSrc`\nrows and `wOffsetSrc` bytes from the upper left corner to the CUDA\narray `dst` starting at `hOffsetDst` rows and `wOffsetDst` bytes from\nthe upper left corner, where `kind` specifies the direction of the\ncopy, and must be one of :py:obj:`~.cudaMemcpyHostToHost`,\n:py:obj:`~.cudaMemcpyHostToDevice`, :py:obj:`~.cudaMemcpyDeviceToHost`,\n:py:obj:`~.cudaMemcpyDeviceToDevice`, or :py:obj:`~.cudaMemcpyDefault`.\nPassing :py:obj:`~.cudaMemcpyDefault` is recommended, in which case the\ntype of transfer is inferred from the pointer values. However,\n:py:obj:`~.cudaMemcpyDefault` is only allowed on systems that support\nunified virtual addressing.\n\nParameters\n----------\ndst : :py:obj:`~.cudaArray_t`\n Destination memory address\nwOffsetDst : size_t\n Destination starting X offset (columns in bytes)\nhOffsetDst : size_t\n Destination starting Y offset (rows)\nsrc : :py:obj:`~.cudaArray_const_t`\n Source memory address\nwOffsetSrc : size_t\n Source starting X offset (columns in bytes)\nhOffsetSrc : size_t\n Source starting Y offset (rows)\ncount : size_t\n Size in bytes to copy\nkind : :py:obj:`~.cudaMemcpyKind`\n Type of transfer\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidMemcpyDirection`\n\nSee Also\n--------\n:py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpy2D`, :py:obj:`~.cudaMemcpyToArray`, :py:obj:`~.cudaMemcpy2DToArray`, :py:obj:`~.cudaMemcpyFromArray`, :py:obj:`~.cudaMemcpy2DFromArray`, :py:obj:`~.cudaMemcpy2DArrayToArray`, :py:obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy2DAsync`, :py:obj:`~.cudaMemcpyToArrayAsync`, :py:obj:""`~.cudaMemcpy2DToArrayAsync`, :py:obj:`~.cudaMemcpyFromArrayAsync`, :py:obj:`~.cudaMemcpy2DFromArrayAsync`, :py:obj:`~.cudaMemcpyToSymbolAsync`, :py:obj:`~.cudaMemcpyFromSymbolAsync`, :py:obj:`~.cuMemcpyAtoA`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_291cudaMemcpyArrayToArray = {"cudaMemcpyArrayToArray", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_291cudaMemcpyArrayToArray, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_290cudaMemcpyArrayToArray}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_291cudaMemcpyArrayToArray(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_dst = 0; size_t __pyx_v_wOffsetDst; size_t __pyx_v_hOffsetDst; PyObject *__pyx_v_src = 0; size_t __pyx_v_wOffsetSrc; size_t __pyx_v_hOffsetSrc; size_t __pyx_v_count; PyObject *__pyx_v_kind = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[8] = {0,0,0,0,0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemcpyArrayToArray (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_dst_2,&__pyx_mstate_global->__pyx_n_u_wOffsetDst,&__pyx_mstate_global->__pyx_n_u_hOffsetDst,&__pyx_mstate_global->__pyx_n_u_src_2,&__pyx_mstate_global->__pyx_n_u_wOffsetSrc,&__pyx_mstate_global->__pyx_n_u_hOffsetSrc,&__pyx_mstate_global->__pyx_n_u_count,&__pyx_mstate_global->__pyx_n_u_kind_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23395, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 8: values[7] = __Pyx_ArgRef_FASTCALL(__pyx_args, 7); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[7])) __PYX_ERR(0, 23395, __pyx_L3_error) CYTHON_FALLTHROUGH; case 7: values[6] = __Pyx_ArgRef_FASTCALL(__pyx_args, 6); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[6])) __PYX_ERR(0, 23395, __pyx_L3_error) CYTHON_FALLTHROUGH; case 6: values[5] = __Pyx_ArgRef_FASTCALL(__pyx_args, 5); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[5])) __PYX_ERR(0, 23395, __pyx_L3_error) CYTHON_FALLTHROUGH; case 5: values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 23395, __pyx_L3_error) CYTHON_FALLTHROUGH; case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 23395, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 23395, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23395, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23395, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemcpyArrayToArray", 0) < (0)) __PYX_ERR(0, 23395, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 8; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemcpyArrayToArray", 1, 8, 8, i); __PYX_ERR(0, 23395, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 8)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23395, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23395, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 23395, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 23395, __pyx_L3_error) values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 23395, __pyx_L3_error) values[5] = __Pyx_ArgRef_FASTCALL(__pyx_args, 5); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[5])) __PYX_ERR(0, 23395, __pyx_L3_error) values[6] = __Pyx_ArgRef_FASTCALL(__pyx_args, 6); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[6])) __PYX_ERR(0, 23395, __pyx_L3_error) values[7] = __Pyx_ArgRef_FASTCALL(__pyx_args, 7); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[7])) __PYX_ERR(0, 23395, __pyx_L3_error) } __pyx_v_dst = values[0]; __pyx_v_wOffsetDst = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_wOffsetDst == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 23396, __pyx_L3_error) __pyx_v_hOffsetDst = __Pyx_PyLong_As_size_t(values[2]); if (unlikely((__pyx_v_hOffsetDst == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 23396, __pyx_L3_error) __pyx_v_src = values[3]; __pyx_v_wOffsetSrc = __Pyx_PyLong_As_size_t(values[4]); if (unlikely((__pyx_v_wOffsetSrc == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 23396, __pyx_L3_error) __pyx_v_hOffsetSrc = __Pyx_PyLong_As_size_t(values[5]); if (unlikely((__pyx_v_hOffsetSrc == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 23396, __pyx_L3_error) __pyx_v_count = __Pyx_PyLong_As_size_t(values[6]); if (unlikely((__pyx_v_count == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 23396, __pyx_L3_error) __pyx_v_kind = values[7]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemcpyArrayToArray", 1, 8, 8, __pyx_nargs); __PYX_ERR(0, 23395, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpyArrayToArray", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_kind) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "kind"); __PYX_ERR(0, 23396, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_290cudaMemcpyArrayToArray(__pyx_self, __pyx_v_dst, __pyx_v_wOffsetDst, __pyx_v_hOffsetDst, __pyx_v_src, __pyx_v_wOffsetSrc, __pyx_v_hOffsetSrc, __pyx_v_count, __pyx_v_kind); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_290cudaMemcpyArrayToArray(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_dst, size_t __pyx_v_wOffsetDst, size_t __pyx_v_hOffsetDst, PyObject *__pyx_v_src, size_t __pyx_v_wOffsetSrc, size_t __pyx_v_hOffsetSrc, size_t __pyx_v_count, PyObject *__pyx_v_kind) { cudaArray_const_t __pyx_v_cysrc; PyObject *__pyx_v_psrc = NULL; cudaArray_t __pyx_v_cydst; PyObject *__pyx_v_pdst = NULL; enum cudaMemcpyKind __pyx_v_cykind; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; enum cudaMemcpyKind __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemcpyArrayToArray", 0); /* "cuda/bindings/runtime.pyx":23442 * """ * cdef cyruntime.cudaArray_const_t cysrc * if src is None: # <<<<<<<<<<<<<< * psrc = 0 * elif isinstance(src, (cudaArray_const_t,)): */ __pyx_t_1 = (__pyx_v_src == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":23443 * cdef cyruntime.cudaArray_const_t cysrc * if src is None: * psrc = 0 # <<<<<<<<<<<<<< * elif isinstance(src, (cudaArray_const_t,)): * psrc = int(src) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_psrc = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":23442 * """ * cdef cyruntime.cudaArray_const_t cysrc * if src is None: # <<<<<<<<<<<<<< * psrc = 0 * elif isinstance(src, (cudaArray_const_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":23444 * if src is None: * psrc = 0 * elif isinstance(src, (cudaArray_const_t,)): # <<<<<<<<<<<<<< * psrc = int(src) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_src, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_const_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":23445 * psrc = 0 * elif isinstance(src, (cudaArray_const_t,)): * psrc = int(src) # <<<<<<<<<<<<<< * else: * psrc = int(cudaArray_const_t(src)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_src); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23445, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_psrc = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":23444 * if src is None: * psrc = 0 * elif isinstance(src, (cudaArray_const_t,)): # <<<<<<<<<<<<<< * psrc = int(src) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":23447 * psrc = int(src) * else: * psrc = int(cudaArray_const_t(src)) # <<<<<<<<<<<<<< * cysrc = psrc * cdef cyruntime.cudaArray_t cydst */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_const_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_const_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_src}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23447, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_psrc = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":23448 * else: * psrc = int(cudaArray_const_t(src)) * cysrc = psrc # <<<<<<<<<<<<<< * cdef cyruntime.cudaArray_t cydst * if dst is None: */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_psrc); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 23448, __pyx_L1_error) __pyx_v_cysrc = ((cudaArray_const_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":23450 * cysrc = psrc * cdef cyruntime.cudaArray_t cydst * if dst is None: # <<<<<<<<<<<<<< * pdst = 0 * elif isinstance(dst, (cudaArray_t,)): */ __pyx_t_1 = (__pyx_v_dst == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":23451 * cdef cyruntime.cudaArray_t cydst * if dst is None: * pdst = 0 # <<<<<<<<<<<<<< * elif isinstance(dst, (cudaArray_t,)): * pdst = int(dst) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pdst = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":23450 * cysrc = psrc * cdef cyruntime.cudaArray_t cydst * if dst is None: # <<<<<<<<<<<<<< * pdst = 0 * elif isinstance(dst, (cudaArray_t,)): */ goto __pyx_L4; } /* "cuda/bindings/runtime.pyx":23452 * if dst is None: * pdst = 0 * elif isinstance(dst, (cudaArray_t,)): # <<<<<<<<<<<<<< * pdst = int(dst) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_dst, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":23453 * pdst = 0 * elif isinstance(dst, (cudaArray_t,)): * pdst = int(dst) # <<<<<<<<<<<<<< * else: * pdst = int(cudaArray_t(dst)) */ __pyx_t_4 = __Pyx_PyNumber_Int(__pyx_v_dst); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23453, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_v_pdst = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "cuda/bindings/runtime.pyx":23452 * if dst is None: * pdst = 0 * elif isinstance(dst, (cudaArray_t,)): # <<<<<<<<<<<<<< * pdst = int(dst) * else: */ goto __pyx_L4; } /* "cuda/bindings/runtime.pyx":23455 * pdst = int(dst) * else: * pdst = int(cudaArray_t(dst)) # <<<<<<<<<<<<<< * cydst = pdst * cdef cyruntime.cudaMemcpyKind cykind = kind.value */ /*else*/ { __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, __pyx_v_dst}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23455, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_4); } __pyx_t_3 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_4)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23455, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF((PyObject *)__pyx_t_4); __pyx_t_4 = 0; __pyx_v_pdst = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; } __pyx_L4:; /* "cuda/bindings/runtime.pyx":23456 * else: * pdst = int(cudaArray_t(dst)) * cydst = pdst # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pdst); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 23456, __pyx_L1_error) __pyx_v_cydst = ((cudaArray_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":23457 * pdst = int(cudaArray_t(dst)) * cydst = pdst * cdef cyruntime.cudaMemcpyKind cykind = kind.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemcpyArrayToArray(cydst, wOffsetDst, hOffsetDst, cysrc, wOffsetSrc, hOffsetSrc, count, cykind) */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_kind, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23457, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_7 = ((enum cudaMemcpyKind)__Pyx_PyLong_As_enum__cudaMemcpyKind(__pyx_t_3)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 23457, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_cykind = __pyx_t_7; /* "cuda/bindings/runtime.pyx":23458 * cydst = pdst * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpyArrayToArray(cydst, wOffsetDst, hOffsetDst, cysrc, wOffsetSrc, hOffsetSrc, count, cykind) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":23459 * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: * err = cyruntime.cudaMemcpyArrayToArray(cydst, wOffsetDst, hOffsetDst, cysrc, wOffsetSrc, hOffsetSrc, count, cykind) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemcpyArrayToArray(__pyx_v_cydst, __pyx_v_wOffsetDst, __pyx_v_hOffsetDst, __pyx_v_cysrc, __pyx_v_wOffsetSrc, __pyx_v_hOffsetSrc, __pyx_v_count, __pyx_v_cykind); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23459, __pyx_L6_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":23458 * cydst = pdst * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpyArrayToArray(cydst, wOffsetDst, hOffsetDst, cysrc, wOffsetSrc, hOffsetSrc, count, cykind) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L7; } __pyx_L6_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L7:; } } /* "cuda/bindings/runtime.pyx":23460 * with nogil: * err = cyruntime.cudaMemcpyArrayToArray(cydst, wOffsetDst, hOffsetDst, cysrc, wOffsetSrc, hOffsetSrc, count, cykind) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23460, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23460, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 23460, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23460, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 23460, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":23395 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpyArrayToArray(dst, size_t wOffsetDst, size_t hOffsetDst, src, size_t wOffsetSrc, size_t hOffsetSrc, size_t count, kind not None : cudaMemcpyKind): * """ Copies data between host and device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpyArrayToArray", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_psrc); __Pyx_XDECREF(__pyx_v_pdst); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":23462 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpyToArrayAsync(dst, size_t wOffset, size_t hOffset, src, size_t count, kind not None : cudaMemcpyKind, stream): * """ Copies data between host and device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_293cudaMemcpyToArrayAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_292cudaMemcpyToArrayAsync, "cudaMemcpyToArrayAsync(dst, size_t wOffset, size_t hOffset, src, size_t count, kind: cudaMemcpyKind, stream)\n\nCopies data between host and device.\n\n[Deprecated]\n\nCopies `count` bytes from the memory area pointed to by `src` to the\nCUDA array `dst` starting at `hOffset` rows and `wOffset` bytes from\nthe upper left corner, where `kind` specifies the direction of the\ncopy, and must be one of :py:obj:`~.cudaMemcpyHostToHost`,\n:py:obj:`~.cudaMemcpyHostToDevice`, :py:obj:`~.cudaMemcpyDeviceToHost`,\n:py:obj:`~.cudaMemcpyDeviceToDevice`, or :py:obj:`~.cudaMemcpyDefault`.\nPassing :py:obj:`~.cudaMemcpyDefault` is recommended, in which case the\ntype of transfer is inferred from the pointer values. However,\n:py:obj:`~.cudaMemcpyDefault` is only allowed on systems that support\nunified virtual addressing.\n\n:py:obj:`~.cudaMemcpyToArrayAsync()` is asynchronous with respect to\nthe host, so the call may return before the copy is complete. The copy\ncan optionally be associated to a stream by passing a non-zero `stream`\nargument. If `kind` is :py:obj:`~.cudaMemcpyHostToDevice` or\n:py:obj:`~.cudaMemcpyDeviceToHost` and `stream` is non-zero, the copy\nmay overlap with operations in other streams.\n\nParameters\n----------\ndst : :py:obj:`~.cudaArray_t`\n Destination memory address\nwOffset : size_t\n Destination starting X offset (columns in bytes)\nhOffset : size_t\n Destination starting Y offset (rows)\nsrc : Any\n Source memory address\ncount : size_t\n Size in bytes to copy\nkind : :py:obj:`~.cudaMemcpyKind`\n Type of transfer\nstream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Stream identifier\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidMemcpyDirection`\n\nSee Also\n--------\n:py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpy2D`, :py:obj:`~.cudaMemcpyToArray`, :py:obj:`~.cudaMemcpy2DToArray`, :py:obj:`~.cudaMemcpyFromArray`, :py:obj:`~.cudaMemcpy2DFromArray""`, :py:obj:`~.cudaMemcpyArrayToArray`, :py:obj:`~.cudaMemcpy2DArrayToArray`, :py:obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy2DAsync`, :py:obj:`~.cudaMemcpy2DToArrayAsync`, :py:obj:`~.cudaMemcpyFromArrayAsync`, :py:obj:`~.cudaMemcpy2DFromArrayAsync`, :py:obj:`~.cudaMemcpyToSymbolAsync`, :py:obj:`~.cudaMemcpyFromSymbolAsync`, :py:obj:`~.cuMemcpyHtoAAsync`, :py:obj:`~.cuMemcpy2DAsync`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_293cudaMemcpyToArrayAsync = {"cudaMemcpyToArrayAsync", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_293cudaMemcpyToArrayAsync, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_292cudaMemcpyToArrayAsync}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_293cudaMemcpyToArrayAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_dst = 0; size_t __pyx_v_wOffset; size_t __pyx_v_hOffset; PyObject *__pyx_v_src = 0; size_t __pyx_v_count; PyObject *__pyx_v_kind = 0; PyObject *__pyx_v_stream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[7] = {0,0,0,0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemcpyToArrayAsync (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_dst_2,&__pyx_mstate_global->__pyx_n_u_wOffset,&__pyx_mstate_global->__pyx_n_u_hOffset,&__pyx_mstate_global->__pyx_n_u_src_2,&__pyx_mstate_global->__pyx_n_u_count,&__pyx_mstate_global->__pyx_n_u_kind_2,&__pyx_mstate_global->__pyx_n_u_stream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23462, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 7: values[6] = __Pyx_ArgRef_FASTCALL(__pyx_args, 6); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[6])) __PYX_ERR(0, 23462, __pyx_L3_error) CYTHON_FALLTHROUGH; case 6: values[5] = __Pyx_ArgRef_FASTCALL(__pyx_args, 5); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[5])) __PYX_ERR(0, 23462, __pyx_L3_error) CYTHON_FALLTHROUGH; case 5: values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 23462, __pyx_L3_error) CYTHON_FALLTHROUGH; case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 23462, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 23462, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23462, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23462, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemcpyToArrayAsync", 0) < (0)) __PYX_ERR(0, 23462, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 7; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemcpyToArrayAsync", 1, 7, 7, i); __PYX_ERR(0, 23462, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 7)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23462, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23462, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 23462, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 23462, __pyx_L3_error) values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 23462, __pyx_L3_error) values[5] = __Pyx_ArgRef_FASTCALL(__pyx_args, 5); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[5])) __PYX_ERR(0, 23462, __pyx_L3_error) values[6] = __Pyx_ArgRef_FASTCALL(__pyx_args, 6); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[6])) __PYX_ERR(0, 23462, __pyx_L3_error) } __pyx_v_dst = values[0]; __pyx_v_wOffset = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_wOffset == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 23463, __pyx_L3_error) __pyx_v_hOffset = __Pyx_PyLong_As_size_t(values[2]); if (unlikely((__pyx_v_hOffset == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 23463, __pyx_L3_error) __pyx_v_src = values[3]; __pyx_v_count = __Pyx_PyLong_As_size_t(values[4]); if (unlikely((__pyx_v_count == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 23463, __pyx_L3_error) __pyx_v_kind = values[5]; __pyx_v_stream = values[6]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemcpyToArrayAsync", 1, 7, 7, __pyx_nargs); __PYX_ERR(0, 23462, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpyToArrayAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_kind) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "kind"); __PYX_ERR(0, 23463, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_292cudaMemcpyToArrayAsync(__pyx_self, __pyx_v_dst, __pyx_v_wOffset, __pyx_v_hOffset, __pyx_v_src, __pyx_v_count, __pyx_v_kind, __pyx_v_stream); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_292cudaMemcpyToArrayAsync(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_dst, size_t __pyx_v_wOffset, size_t __pyx_v_hOffset, PyObject *__pyx_v_src, size_t __pyx_v_count, PyObject *__pyx_v_kind, PyObject *__pyx_v_stream) { cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; cudaArray_t __pyx_v_cydst; PyObject *__pyx_v_pdst = NULL; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cysrc = NULL; void *__pyx_v_cysrc_ptr; enum cudaMemcpyKind __pyx_v_cykind; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; enum cudaMemcpyKind __pyx_t_8; cudaError_t __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemcpyToArrayAsync", 0); /* "cuda/bindings/runtime.pyx":23513 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":23514 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":23513 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":23515 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":23516 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23516, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":23515 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":23518 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * cdef cyruntime.cudaArray_t cydst */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_stream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23518, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23518, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":23519 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * cdef cyruntime.cudaArray_t cydst * if dst is None: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 23519, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":23521 * cystream = pstream * cdef cyruntime.cudaArray_t cydst * if dst is None: # <<<<<<<<<<<<<< * pdst = 0 * elif isinstance(dst, (cudaArray_t,)): */ __pyx_t_1 = (__pyx_v_dst == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":23522 * cdef cyruntime.cudaArray_t cydst * if dst is None: * pdst = 0 # <<<<<<<<<<<<<< * elif isinstance(dst, (cudaArray_t,)): * pdst = int(dst) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pdst = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":23521 * cystream = pstream * cdef cyruntime.cudaArray_t cydst * if dst is None: # <<<<<<<<<<<<<< * pdst = 0 * elif isinstance(dst, (cudaArray_t,)): */ goto __pyx_L6; } /* "cuda/bindings/runtime.pyx":23523 * if dst is None: * pdst = 0 * elif isinstance(dst, (cudaArray_t,)): # <<<<<<<<<<<<<< * pdst = int(dst) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_dst, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":23524 * pdst = 0 * elif isinstance(dst, (cudaArray_t,)): * pdst = int(dst) # <<<<<<<<<<<<<< * else: * pdst = int(cudaArray_t(dst)) */ __pyx_t_5 = __Pyx_PyNumber_Int(__pyx_v_dst); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23524, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_v_pdst = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":23523 * if dst is None: * pdst = 0 * elif isinstance(dst, (cudaArray_t,)): # <<<<<<<<<<<<<< * pdst = int(dst) * else: */ goto __pyx_L6; } /* "cuda/bindings/runtime.pyx":23526 * pdst = int(dst) * else: * pdst = int(cudaArray_t(dst)) # <<<<<<<<<<<<<< * cydst = pdst * cysrc = _HelperInputVoidPtr(src) */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_dst}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23526, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_5)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23526, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_5); __pyx_t_5 = 0; __pyx_v_pdst = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L6:; /* "cuda/bindings/runtime.pyx":23527 * else: * pdst = int(cudaArray_t(dst)) * cydst = pdst # <<<<<<<<<<<<<< * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pdst); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 23527, __pyx_L1_error) __pyx_v_cydst = ((cudaArray_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":23528 * pdst = int(cudaArray_t(dst)) * cydst = pdst * cysrc = _HelperInputVoidPtr(src) # <<<<<<<<<<<<<< * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value */ __pyx_t_5 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_v_src}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23528, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_4); } __pyx_v_cysrc = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_4); __pyx_t_4 = 0; /* "cuda/bindings/runtime.pyx":23529 * cydst = pdst * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cysrc), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23529, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_4); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 23529, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_cysrc_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":23530 * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemcpyToArrayAsync(cydst, wOffset, hOffset, cysrc_ptr, count, cykind, cystream) */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_kind, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23530, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_8 = ((enum cudaMemcpyKind)__Pyx_PyLong_As_enum__cudaMemcpyKind(__pyx_t_4)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 23530, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_cykind = __pyx_t_8; /* "cuda/bindings/runtime.pyx":23531 * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpyToArrayAsync(cydst, wOffset, hOffset, cysrc_ptr, count, cykind, cystream) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":23532 * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: * err = cyruntime.cudaMemcpyToArrayAsync(cydst, wOffset, hOffset, cysrc_ptr, count, cykind, cystream) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_9 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemcpyToArrayAsync(__pyx_v_cydst, __pyx_v_wOffset, __pyx_v_hOffset, __pyx_v_cysrc_ptr, __pyx_v_count, __pyx_v_cykind, __pyx_v_cystream); if (unlikely(__pyx_t_9 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23532, __pyx_L8_error) __pyx_v_err = __pyx_t_9; } /* "cuda/bindings/runtime.pyx":23531 * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpyToArrayAsync(cydst, wOffset, hOffset, cysrc_ptr, count, cykind, cystream) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L9; } __pyx_L8_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L9:; } } /* "cuda/bindings/runtime.pyx":23533 * with nogil: * err = cyruntime.cudaMemcpyToArrayAsync(cydst, wOffset, hOffset, cysrc_ptr, count, cykind, cystream) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23533, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23533, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23533, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23533, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 23533, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":23462 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpyToArrayAsync(dst, size_t wOffset, size_t hOffset, src, size_t count, kind not None : cudaMemcpyKind, stream): * """ Copies data between host and device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpyToArrayAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XDECREF(__pyx_v_pdst); __Pyx_XDECREF((PyObject *)__pyx_v_cysrc); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":23535 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpyFromArrayAsync(dst, src, size_t wOffset, size_t hOffset, size_t count, kind not None : cudaMemcpyKind, stream): * """ Copies data between host and device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_295cudaMemcpyFromArrayAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_294cudaMemcpyFromArrayAsync, "cudaMemcpyFromArrayAsync(dst, src, size_t wOffset, size_t hOffset, size_t count, kind: cudaMemcpyKind, stream)\n\nCopies data between host and device.\n\n[Deprecated]\n\nCopies `count` bytes from the CUDA array `src` starting at `hOffset`\nrows and `wOffset` bytes from the upper left corner to the memory area\npointed to by `dst`, where `kind` specifies the direction of the copy,\nand must be one of :py:obj:`~.cudaMemcpyHostToHost`,\n:py:obj:`~.cudaMemcpyHostToDevice`, :py:obj:`~.cudaMemcpyDeviceToHost`,\n:py:obj:`~.cudaMemcpyDeviceToDevice`, or :py:obj:`~.cudaMemcpyDefault`.\nPassing :py:obj:`~.cudaMemcpyDefault` is recommended, in which case the\ntype of transfer is inferred from the pointer values. However,\n:py:obj:`~.cudaMemcpyDefault` is only allowed on systems that support\nunified virtual addressing.\n\n:py:obj:`~.cudaMemcpyFromArrayAsync()` is asynchronous with respect to\nthe host, so the call may return before the copy is complete. The copy\ncan optionally be associated to a stream by passing a non-zero `stream`\nargument. If `kind` is :py:obj:`~.cudaMemcpyHostToDevice` or\n:py:obj:`~.cudaMemcpyDeviceToHost` and `stream` is non-zero, the copy\nmay overlap with operations in other streams.\n\nParameters\n----------\ndst : Any\n Destination memory address\nsrc : :py:obj:`~.cudaArray_const_t`\n Source memory address\nwOffset : size_t\n Source starting X offset (columns in bytes)\nhOffset : size_t\n Source starting Y offset (rows)\ncount : size_t\n Size in bytes to copy\nkind : :py:obj:`~.cudaMemcpyKind`\n Type of transfer\nstream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Stream identifier\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidMemcpyDirection`\n\nSee Also\n--------\n:py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaMemcpy2D`, :py:obj:`~.cudaMemcpyToArray`, :py:obj:`~.cudaMemcpy2DToArray`, :py:obj:`~.cudaMemcpyFromArray`, :py:obj:`~.cudaMemcpy2DFromArray""`, :py:obj:`~.cudaMemcpyArrayToArray`, :py:obj:`~.cudaMemcpy2DArrayToArray`, :py:obj:`~.cudaMemcpyToSymbol`, :py:obj:`~.cudaMemcpyFromSymbol`, :py:obj:`~.cudaMemcpyAsync`, :py:obj:`~.cudaMemcpy2DAsync`, :py:obj:`~.cudaMemcpyToArrayAsync`, :py:obj:`~.cudaMemcpy2DToArrayAsync`, :py:obj:`~.cudaMemcpy2DFromArrayAsync`, :py:obj:`~.cudaMemcpyToSymbolAsync`, :py:obj:`~.cudaMemcpyFromSymbolAsync`, :py:obj:`~.cuMemcpyAtoHAsync`, :py:obj:`~.cuMemcpy2DAsync`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_295cudaMemcpyFromArrayAsync = {"cudaMemcpyFromArrayAsync", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_295cudaMemcpyFromArrayAsync, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_294cudaMemcpyFromArrayAsync}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_295cudaMemcpyFromArrayAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_dst = 0; PyObject *__pyx_v_src = 0; size_t __pyx_v_wOffset; size_t __pyx_v_hOffset; size_t __pyx_v_count; PyObject *__pyx_v_kind = 0; PyObject *__pyx_v_stream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[7] = {0,0,0,0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemcpyFromArrayAsync (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_dst_2,&__pyx_mstate_global->__pyx_n_u_src_2,&__pyx_mstate_global->__pyx_n_u_wOffset,&__pyx_mstate_global->__pyx_n_u_hOffset,&__pyx_mstate_global->__pyx_n_u_count,&__pyx_mstate_global->__pyx_n_u_kind_2,&__pyx_mstate_global->__pyx_n_u_stream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23535, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 7: values[6] = __Pyx_ArgRef_FASTCALL(__pyx_args, 6); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[6])) __PYX_ERR(0, 23535, __pyx_L3_error) CYTHON_FALLTHROUGH; case 6: values[5] = __Pyx_ArgRef_FASTCALL(__pyx_args, 5); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[5])) __PYX_ERR(0, 23535, __pyx_L3_error) CYTHON_FALLTHROUGH; case 5: values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 23535, __pyx_L3_error) CYTHON_FALLTHROUGH; case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 23535, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 23535, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23535, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23535, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemcpyFromArrayAsync", 0) < (0)) __PYX_ERR(0, 23535, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 7; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemcpyFromArrayAsync", 1, 7, 7, i); __PYX_ERR(0, 23535, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 7)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23535, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23535, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 23535, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 23535, __pyx_L3_error) values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 23535, __pyx_L3_error) values[5] = __Pyx_ArgRef_FASTCALL(__pyx_args, 5); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[5])) __PYX_ERR(0, 23535, __pyx_L3_error) values[6] = __Pyx_ArgRef_FASTCALL(__pyx_args, 6); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[6])) __PYX_ERR(0, 23535, __pyx_L3_error) } __pyx_v_dst = values[0]; __pyx_v_src = values[1]; __pyx_v_wOffset = __Pyx_PyLong_As_size_t(values[2]); if (unlikely((__pyx_v_wOffset == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 23536, __pyx_L3_error) __pyx_v_hOffset = __Pyx_PyLong_As_size_t(values[3]); if (unlikely((__pyx_v_hOffset == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 23536, __pyx_L3_error) __pyx_v_count = __Pyx_PyLong_As_size_t(values[4]); if (unlikely((__pyx_v_count == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 23536, __pyx_L3_error) __pyx_v_kind = values[5]; __pyx_v_stream = values[6]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemcpyFromArrayAsync", 1, 7, 7, __pyx_nargs); __PYX_ERR(0, 23535, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpyFromArrayAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_kind) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "kind"); __PYX_ERR(0, 23536, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_294cudaMemcpyFromArrayAsync(__pyx_self, __pyx_v_dst, __pyx_v_src, __pyx_v_wOffset, __pyx_v_hOffset, __pyx_v_count, __pyx_v_kind, __pyx_v_stream); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_294cudaMemcpyFromArrayAsync(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src, size_t __pyx_v_wOffset, size_t __pyx_v_hOffset, size_t __pyx_v_count, PyObject *__pyx_v_kind, PyObject *__pyx_v_stream) { cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; cudaArray_const_t __pyx_v_cysrc; PyObject *__pyx_v_psrc = NULL; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cydst = NULL; void *__pyx_v_cydst_ptr; enum cudaMemcpyKind __pyx_v_cykind; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; enum cudaMemcpyKind __pyx_t_8; cudaError_t __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemcpyFromArrayAsync", 0); /* "cuda/bindings/runtime.pyx":23586 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":23587 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":23586 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":23588 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":23589 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23589, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":23588 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":23591 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * cdef cyruntime.cudaArray_const_t cysrc */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_stream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23591, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":23592 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * cdef cyruntime.cudaArray_const_t cysrc * if src is None: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 23592, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":23594 * cystream = pstream * cdef cyruntime.cudaArray_const_t cysrc * if src is None: # <<<<<<<<<<<<<< * psrc = 0 * elif isinstance(src, (cudaArray_const_t,)): */ __pyx_t_1 = (__pyx_v_src == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":23595 * cdef cyruntime.cudaArray_const_t cysrc * if src is None: * psrc = 0 # <<<<<<<<<<<<<< * elif isinstance(src, (cudaArray_const_t,)): * psrc = int(src) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_psrc = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":23594 * cystream = pstream * cdef cyruntime.cudaArray_const_t cysrc * if src is None: # <<<<<<<<<<<<<< * psrc = 0 * elif isinstance(src, (cudaArray_const_t,)): */ goto __pyx_L6; } /* "cuda/bindings/runtime.pyx":23596 * if src is None: * psrc = 0 * elif isinstance(src, (cudaArray_const_t,)): # <<<<<<<<<<<<<< * psrc = int(src) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_src, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_const_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":23597 * psrc = 0 * elif isinstance(src, (cudaArray_const_t,)): * psrc = int(src) # <<<<<<<<<<<<<< * else: * psrc = int(cudaArray_const_t(src)) */ __pyx_t_5 = __Pyx_PyNumber_Int(__pyx_v_src); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23597, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_v_psrc = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":23596 * if src is None: * psrc = 0 * elif isinstance(src, (cudaArray_const_t,)): # <<<<<<<<<<<<<< * psrc = int(src) * else: */ goto __pyx_L6; } /* "cuda/bindings/runtime.pyx":23599 * psrc = int(src) * else: * psrc = int(cudaArray_const_t(src)) # <<<<<<<<<<<<<< * cysrc = psrc * cydst = _HelperInputVoidPtr(dst) */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_const_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_const_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_src}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23599, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_5)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23599, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_5); __pyx_t_5 = 0; __pyx_v_psrc = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L6:; /* "cuda/bindings/runtime.pyx":23600 * else: * psrc = int(cudaArray_const_t(src)) * cysrc = psrc # <<<<<<<<<<<<<< * cydst = _HelperInputVoidPtr(dst) * cdef void* cydst_ptr = cydst.cptr */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_psrc); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 23600, __pyx_L1_error) __pyx_v_cysrc = ((cudaArray_const_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":23601 * psrc = int(cudaArray_const_t(src)) * cysrc = psrc * cydst = _HelperInputVoidPtr(dst) # <<<<<<<<<<<<<< * cdef void* cydst_ptr = cydst.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value */ __pyx_t_5 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_v_dst}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23601, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_4); } __pyx_v_cydst = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_4); __pyx_t_4 = 0; /* "cuda/bindings/runtime.pyx":23602 * cysrc = psrc * cydst = _HelperInputVoidPtr(dst) * cdef void* cydst_ptr = cydst.cptr # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cydst), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23602, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_4); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 23602, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_cydst_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":23603 * cydst = _HelperInputVoidPtr(dst) * cdef void* cydst_ptr = cydst.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemcpyFromArrayAsync(cydst_ptr, cysrc, wOffset, hOffset, count, cykind, cystream) */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_kind, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23603, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_8 = ((enum cudaMemcpyKind)__Pyx_PyLong_As_enum__cudaMemcpyKind(__pyx_t_4)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 23603, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_v_cykind = __pyx_t_8; /* "cuda/bindings/runtime.pyx":23604 * cdef void* cydst_ptr = cydst.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpyFromArrayAsync(cydst_ptr, cysrc, wOffset, hOffset, count, cykind, cystream) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":23605 * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: * err = cyruntime.cudaMemcpyFromArrayAsync(cydst_ptr, cysrc, wOffset, hOffset, count, cykind, cystream) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_9 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemcpyFromArrayAsync(__pyx_v_cydst_ptr, __pyx_v_cysrc, __pyx_v_wOffset, __pyx_v_hOffset, __pyx_v_count, __pyx_v_cykind, __pyx_v_cystream); if (unlikely(__pyx_t_9 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23605, __pyx_L8_error) __pyx_v_err = __pyx_t_9; } /* "cuda/bindings/runtime.pyx":23604 * cdef void* cydst_ptr = cydst.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemcpyFromArrayAsync(cydst_ptr, cysrc, wOffset, hOffset, count, cykind, cystream) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L9; } __pyx_L8_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L9:; } } /* "cuda/bindings/runtime.pyx":23606 * with nogil: * err = cyruntime.cudaMemcpyFromArrayAsync(cydst_ptr, cysrc, wOffset, hOffset, count, cykind, cystream) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23606, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23606, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23606, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23606, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 23606, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":23535 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemcpyFromArrayAsync(dst, src, size_t wOffset, size_t hOffset, size_t count, kind not None : cudaMemcpyKind, stream): * """ Copies data between host and device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemcpyFromArrayAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XDECREF(__pyx_v_psrc); __Pyx_XDECREF((PyObject *)__pyx_v_cydst); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":23608 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMallocAsync(size_t size, hStream): * """ Allocates memory with stream ordered semantics. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_297cudaMallocAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_296cudaMallocAsync, "cudaMallocAsync(size_t size, hStream)\n\nAllocates memory with stream ordered semantics.\n\nInserts an allocation operation into `hStream`. A pointer to the\nallocated memory is returned immediately in *dptr. The allocation must\nnot be accessed until the the allocation operation completes. The\nallocation comes from the memory pool associated with the stream's\ndevice.\n\nParameters\n----------\nsize : size_t\n Number of bytes to allocate\nhStream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n The stream establishing the stream ordering contract and the memory\n pool to allocate from\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorNotSupported`, :py:obj:`~.cudaErrorOutOfMemory`,\ndevPtr : Any\n Returned device pointer\n\nSee Also\n--------\n:py:obj:`~.cuMemAllocAsync`, cudaMallocAsync (C++ API), :py:obj:`~.cudaMallocFromPoolAsync`, :py:obj:`~.cudaFreeAsync`, :py:obj:`~.cudaDeviceSetMemPool`, :py:obj:`~.cudaDeviceGetDefaultMemPool`, :py:obj:`~.cudaDeviceGetMemPool`, :py:obj:`~.cudaMemPoolSetAccess`, :py:obj:`~.cudaMemPoolSetAttribute`, :py:obj:`~.cudaMemPoolGetAttribute`\n\nNotes\n-----\nThe default memory pool of a device contains device memory from that device.\n\nBasic stream ordering allows future work submitted into the same stream to use the allocation. Stream query, stream synchronize, and CUDA events can be used to guarantee that the allocation operation completes before work submitted in a separate stream runs.\n\nDuring stream capture, this function results in the creation of an allocation node. In this case, the allocation is owned by the graph instead of the memory pool. The memory pool's properties are used to set the node's creation parameters."); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_297cudaMallocAsync = {"cudaMallocAsync", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_297cudaMallocAsync, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_296cudaMallocAsync}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_297cudaMallocAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { size_t __pyx_v_size; PyObject *__pyx_v_hStream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMallocAsync (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_size_2,&__pyx_mstate_global->__pyx_n_u_hStream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23608, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23608, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23608, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMallocAsync", 0) < (0)) __PYX_ERR(0, 23608, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMallocAsync", 1, 2, 2, i); __PYX_ERR(0, 23608, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23608, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23608, __pyx_L3_error) } __pyx_v_size = __Pyx_PyLong_As_size_t(values[0]); if (unlikely((__pyx_v_size == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 23609, __pyx_L3_error) __pyx_v_hStream = values[1]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMallocAsync", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 23608, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMallocAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_296cudaMallocAsync(__pyx_self, __pyx_v_size, __pyx_v_hStream); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_296cudaMallocAsync(CYTHON_UNUSED PyObject *__pyx_self, size_t __pyx_v_size, PyObject *__pyx_v_hStream) { cudaStream_t __pyx_v_cyhStream; PyObject *__pyx_v_phStream = NULL; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v_devPtr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMallocAsync", 0); /* "cuda/bindings/runtime.pyx":23646 * """ * cdef cyruntime.cudaStream_t cyhStream * if hStream is None: # <<<<<<<<<<<<<< * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_hStream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":23647 * cdef cyruntime.cudaStream_t cyhStream * if hStream is None: * phStream = 0 # <<<<<<<<<<<<<< * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): * phStream = int(hStream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_phStream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":23646 * """ * cdef cyruntime.cudaStream_t cyhStream * if hStream is None: # <<<<<<<<<<<<<< * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":23648 * if hStream is None: * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * phStream = int(hStream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_hStream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_hStream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":23649 * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): * phStream = int(hStream) # <<<<<<<<<<<<<< * else: * phStream = int(cudaStream_t(hStream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_hStream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23649, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_phStream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":23648 * if hStream is None: * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * phStream = int(hStream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":23651 * phStream = int(hStream) * else: * phStream = int(cudaStream_t(hStream)) # <<<<<<<<<<<<<< * cyhStream = phStream * cdef void_ptr devPtr = 0 */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_hStream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23651, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23651, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_phStream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":23652 * else: * phStream = int(cudaStream_t(hStream)) * cyhStream = phStream # <<<<<<<<<<<<<< * cdef void_ptr devPtr = 0 * with nogil: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_phStream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 23652, __pyx_L1_error) __pyx_v_cyhStream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":23653 * phStream = int(cudaStream_t(hStream)) * cyhStream = phStream * cdef void_ptr devPtr = 0 # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMallocAsync(&devPtr, size, cyhStream) */ __pyx_v_devPtr = 0; /* "cuda/bindings/runtime.pyx":23654 * cyhStream = phStream * cdef void_ptr devPtr = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMallocAsync(&devPtr, size, cyhStream) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":23655 * cdef void_ptr devPtr = 0 * with nogil: * err = cyruntime.cudaMallocAsync(&devPtr, size, cyhStream) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMallocAsync(((void **)(&__pyx_v_devPtr)), __pyx_v_size, __pyx_v_cyhStream); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23655, __pyx_L7_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":23654 * cyhStream = phStream * cdef void_ptr devPtr = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMallocAsync(&devPtr, size, cyhStream) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":23656 * with nogil: * err = cyruntime.cudaMallocAsync(&devPtr, size, cyhStream) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], devPtr) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":23657 * err = cyruntime.cudaMallocAsync(&devPtr, size, cyhStream) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], devPtr) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23657, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23657, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23657, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23657, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 23657, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 23657, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":23656 * with nogil: * err = cyruntime.cudaMallocAsync(&devPtr, size, cyhStream) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], devPtr) */ } /* "cuda/bindings/runtime.pyx":23658 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], devPtr) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(__pyx_v_devPtr); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 23658, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 23658, __pyx_L1_error); __pyx_t_5 = 0; __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":23608 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMallocAsync(size_t size, hStream): * """ Allocates memory with stream ordered semantics. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMallocAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_phStream); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":23660 * return (_dict_cudaError_t[err], devPtr) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaFreeAsync(devPtr, hStream): * """ Frees memory with stream ordered semantics. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_299cudaFreeAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_298cudaFreeAsync, "cudaFreeAsync(devPtr, hStream)\n\nFrees memory with stream ordered semantics.\n\nInserts a free operation into `hStream`. The allocation must not be\naccessed after stream execution reaches the free. After this API\nreturns, accessing the memory from any subsequent work launched on the\nGPU or querying its pointer attributes results in undefined behavior.\n\nParameters\n----------\ndptr : Any\n memory to free\nhStream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n The stream establishing the stream ordering promise\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorNotSupported`\n\nSee Also\n--------\n:py:obj:`~.cuMemFreeAsync`, :py:obj:`~.cudaMallocAsync`\n\nNotes\n-----\nDuring stream capture, this function results in the creation of a free node and must therefore be passed the address of a graph allocation."); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_299cudaFreeAsync = {"cudaFreeAsync", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_299cudaFreeAsync, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_298cudaFreeAsync}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_299cudaFreeAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_devPtr = 0; PyObject *__pyx_v_hStream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaFreeAsync (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_devPtr_2,&__pyx_mstate_global->__pyx_n_u_hStream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23660, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23660, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23660, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaFreeAsync", 0) < (0)) __PYX_ERR(0, 23660, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaFreeAsync", 1, 2, 2, i); __PYX_ERR(0, 23660, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23660, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23660, __pyx_L3_error) } __pyx_v_devPtr = values[0]; __pyx_v_hStream = values[1]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaFreeAsync", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 23660, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaFreeAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_298cudaFreeAsync(__pyx_self, __pyx_v_devPtr, __pyx_v_hStream); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_298cudaFreeAsync(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_devPtr, PyObject *__pyx_v_hStream) { cudaStream_t __pyx_v_cyhStream; PyObject *__pyx_v_phStream = NULL; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cydevPtr = NULL; void *__pyx_v_cydevPtr_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaFreeAsync", 0); /* "cuda/bindings/runtime.pyx":23690 * """ * cdef cyruntime.cudaStream_t cyhStream * if hStream is None: # <<<<<<<<<<<<<< * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_hStream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":23691 * cdef cyruntime.cudaStream_t cyhStream * if hStream is None: * phStream = 0 # <<<<<<<<<<<<<< * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): * phStream = int(hStream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_phStream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":23690 * """ * cdef cyruntime.cudaStream_t cyhStream * if hStream is None: # <<<<<<<<<<<<<< * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":23692 * if hStream is None: * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * phStream = int(hStream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_hStream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_hStream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":23693 * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): * phStream = int(hStream) # <<<<<<<<<<<<<< * else: * phStream = int(cudaStream_t(hStream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_hStream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23693, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_phStream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":23692 * if hStream is None: * phStream = 0 * elif isinstance(hStream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * phStream = int(hStream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":23695 * phStream = int(hStream) * else: * phStream = int(cudaStream_t(hStream)) # <<<<<<<<<<<<<< * cyhStream = phStream * cydevPtr = _HelperInputVoidPtr(devPtr) */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_hStream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23695, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23695, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_phStream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":23696 * else: * phStream = int(cudaStream_t(hStream)) * cyhStream = phStream # <<<<<<<<<<<<<< * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_phStream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 23696, __pyx_L1_error) __pyx_v_cyhStream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":23697 * phStream = int(cudaStream_t(hStream)) * cyhStream = phStream * cydevPtr = _HelperInputVoidPtr(devPtr) # <<<<<<<<<<<<<< * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_devPtr}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23697, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_cydevPtr = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":23698 * cyhStream = phStream * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaFreeAsync(cydevPtr_ptr, cyhStream) */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cydevPtr), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_5); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 23698, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_cydevPtr_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":23699 * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaFreeAsync(cydevPtr_ptr, cyhStream) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":23700 * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: * err = cyruntime.cudaFreeAsync(cydevPtr_ptr, cyhStream) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaFreeAsync(__pyx_v_cydevPtr_ptr, __pyx_v_cyhStream); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23700, __pyx_L7_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":23699 * cydevPtr = _HelperInputVoidPtr(devPtr) * cdef void* cydevPtr_ptr = cydevPtr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaFreeAsync(cydevPtr_ptr, cyhStream) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":23701 * with nogil: * err = cyruntime.cudaFreeAsync(cydevPtr_ptr, cyhStream) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23701, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23701, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23701, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23701, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 23701, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":23660 * return (_dict_cudaError_t[err], devPtr) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaFreeAsync(devPtr, hStream): * """ Frees memory with stream ordered semantics. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaFreeAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_phStream); __Pyx_XDECREF((PyObject *)__pyx_v_cydevPtr); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":23703 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemPoolTrimTo(memPool, size_t minBytesToKeep): * """ Tries to release memory back to the OS. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_301cudaMemPoolTrimTo(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_300cudaMemPoolTrimTo, "cudaMemPoolTrimTo(memPool, size_t minBytesToKeep)\n\nTries to release memory back to the OS.\n\nReleases memory back to the OS until the pool contains fewer than\nminBytesToKeep reserved bytes, or there is no more memory that the\nallocator can safely release. The allocator cannot release OS\nallocations that back outstanding asynchronous allocations. The OS\nallocations may happen at different granularity from the user\nallocations.\n\nParameters\n----------\npool : :py:obj:`~.CUmemoryPool` or :py:obj:`~.cudaMemPool_t`\n The memory pool to trim\nminBytesToKeep : size_t\n If the pool has less than minBytesToKeep reserved, the TrimTo\n operation is a no-op. Otherwise the pool will be guaranteed to have\n at least minBytesToKeep bytes reserved after the operation.\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\n\nSee Also\n--------\n:py:obj:`~.cuMemPoolTrimTo`, :py:obj:`~.cudaMallocAsync`, :py:obj:`~.cudaFreeAsync`, :py:obj:`~.cudaDeviceGetDefaultMemPool`, :py:obj:`~.cudaDeviceGetMemPool`, :py:obj:`~.cudaMemPoolCreate`\n\nNotes\n-----\n: Allocations that have not been freed count as outstanding.\n\n: Allocations that have been asynchronously freed but whose completion has not been observed on the host (eg. by a synchronize) can count as outstanding."); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_301cudaMemPoolTrimTo = {"cudaMemPoolTrimTo", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_301cudaMemPoolTrimTo, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_300cudaMemPoolTrimTo}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_301cudaMemPoolTrimTo(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_memPool = 0; size_t __pyx_v_minBytesToKeep; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemPoolTrimTo (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_memPool,&__pyx_mstate_global->__pyx_n_u_minBytesToKeep,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23703, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23703, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23703, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemPoolTrimTo", 0) < (0)) __PYX_ERR(0, 23703, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemPoolTrimTo", 1, 2, 2, i); __PYX_ERR(0, 23703, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23703, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23703, __pyx_L3_error) } __pyx_v_memPool = values[0]; __pyx_v_minBytesToKeep = __Pyx_PyLong_As_size_t(values[1]); if (unlikely((__pyx_v_minBytesToKeep == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 23704, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemPoolTrimTo", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 23703, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemPoolTrimTo", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_300cudaMemPoolTrimTo(__pyx_self, __pyx_v_memPool, __pyx_v_minBytesToKeep); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_300cudaMemPoolTrimTo(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_memPool, size_t __pyx_v_minBytesToKeep) { cudaMemPool_t __pyx_v_cymemPool; PyObject *__pyx_v_pmemPool = NULL; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemPoolTrimTo", 0); /* "cuda/bindings/runtime.pyx":23739 * """ * cdef cyruntime.cudaMemPool_t cymemPool * if memPool is None: # <<<<<<<<<<<<<< * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): */ __pyx_t_1 = (__pyx_v_memPool == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":23740 * cdef cyruntime.cudaMemPool_t cymemPool * if memPool is None: * pmemPool = 0 # <<<<<<<<<<<<<< * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): * pmemPool = int(memPool) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pmemPool = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":23739 * """ * cdef cyruntime.cudaMemPool_t cymemPool * if memPool is None: # <<<<<<<<<<<<<< * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":23741 * if memPool is None: * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): # <<<<<<<<<<<<<< * pmemPool = int(memPool) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_memPool, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_memPool, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUmemoryPool); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":23742 * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): * pmemPool = int(memPool) # <<<<<<<<<<<<<< * else: * pmemPool = int(cudaMemPool_t(memPool)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_memPool); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23742, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pmemPool = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":23741 * if memPool is None: * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): # <<<<<<<<<<<<<< * pmemPool = int(memPool) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":23744 * pmemPool = int(memPool) * else: * pmemPool = int(cudaMemPool_t(memPool)) # <<<<<<<<<<<<<< * cymemPool = pmemPool * with nogil: */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_memPool}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23744, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23744, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pmemPool = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":23745 * else: * pmemPool = int(cudaMemPool_t(memPool)) * cymemPool = pmemPool # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemPoolTrimTo(cymemPool, minBytesToKeep) */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pmemPool); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 23745, __pyx_L1_error) __pyx_v_cymemPool = ((cudaMemPool_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":23746 * pmemPool = int(cudaMemPool_t(memPool)) * cymemPool = pmemPool * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemPoolTrimTo(cymemPool, minBytesToKeep) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":23747 * cymemPool = pmemPool * with nogil: * err = cyruntime.cudaMemPoolTrimTo(cymemPool, minBytesToKeep) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemPoolTrimTo(__pyx_v_cymemPool, __pyx_v_minBytesToKeep); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23747, __pyx_L7_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":23746 * pmemPool = int(cudaMemPool_t(memPool)) * cymemPool = pmemPool * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemPoolTrimTo(cymemPool, minBytesToKeep) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":23748 * with nogil: * err = cyruntime.cudaMemPoolTrimTo(cymemPool, minBytesToKeep) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23748, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23748, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23748, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23748, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 23748, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":23703 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemPoolTrimTo(memPool, size_t minBytesToKeep): * """ Tries to release memory back to the OS. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemPoolTrimTo", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pmemPool); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":23750 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemPoolSetAttribute(memPool, attr not None : cudaMemPoolAttr, value): * """ Sets attributes of a memory pool. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_303cudaMemPoolSetAttribute(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_302cudaMemPoolSetAttribute, "cudaMemPoolSetAttribute(memPool, attr: cudaMemPoolAttr, value)\n\nSets attributes of a memory pool.\n\nSupported attributes are:\n\n- :py:obj:`~.cudaMemPoolAttrReleaseThreshold`: (value type =\n cuuint64_t) Amount of reserved memory in bytes to hold onto before\n trying to release memory back to the OS. When more than the release\n threshold bytes of memory are held by the memory pool, the allocator\n will try to release memory back to the OS on the next call to stream,\n event or context synchronize. (default 0)\n\n- :py:obj:`~.cudaMemPoolReuseFollowEventDependencies`: (value type =\n int) Allow :py:obj:`~.cudaMallocAsync` to use memory asynchronously\n freed in another stream as long as a stream ordering dependency of\n the allocating stream on the free action exists. Cuda events and null\n stream interactions can create the required stream ordered\n dependencies. (default enabled)\n\n- :py:obj:`~.cudaMemPoolReuseAllowOpportunistic`: (value type = int)\n Allow reuse of already completed frees when there is no dependency\n between the free and allocation. (default enabled)\n\n- :py:obj:`~.cudaMemPoolReuseAllowInternalDependencies`: (value type =\n int) Allow :py:obj:`~.cudaMallocAsync` to insert new stream\n dependencies in order to establish the stream ordering required to\n reuse a piece of memory released by :py:obj:`~.cudaFreeAsync`\n (default enabled).\n\n- :py:obj:`~.cudaMemPoolAttrReservedMemHigh`: (value type = cuuint64_t)\n Reset the high watermark that tracks the amount of backing memory\n that was allocated for the memory pool. It is illegal to set this\n attribute to a non-zero value.\n\n- :py:obj:`~.cudaMemPoolAttrUsedMemHigh`: (value type = cuuint64_t)\n Reset the high watermark that tracks the amount of used memory that\n was allocated for the memory pool. It is illegal to set this\n attribute to a non-zero value.\n\nParameters\n----------\npool : :py:obj:`~.CUmemoryPool` or :py:obj:`~.cudaMemPool_t`\n The memory pool to modi""fy\nattr : :py:obj:`~.cudaMemPoolAttr`\n The attribute to modify\nvalue : Any\n Pointer to the value to assign\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\n\nSee Also\n--------\n:py:obj:`~.cuMemPoolSetAttribute`, :py:obj:`~.cudaMallocAsync`, :py:obj:`~.cudaFreeAsync`, :py:obj:`~.cudaDeviceGetDefaultMemPool`, :py:obj:`~.cudaDeviceGetMemPool`, :py:obj:`~.cudaMemPoolCreate`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_303cudaMemPoolSetAttribute = {"cudaMemPoolSetAttribute", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_303cudaMemPoolSetAttribute, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_302cudaMemPoolSetAttribute}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_303cudaMemPoolSetAttribute(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_memPool = 0; PyObject *__pyx_v_attr = 0; PyObject *__pyx_v_value = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[3] = {0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemPoolSetAttribute (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_memPool,&__pyx_mstate_global->__pyx_n_u_attr,&__pyx_mstate_global->__pyx_n_u_value,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23750, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 23750, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23750, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23750, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemPoolSetAttribute", 0) < (0)) __PYX_ERR(0, 23750, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 3; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemPoolSetAttribute", 1, 3, 3, i); __PYX_ERR(0, 23750, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 3)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23750, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23750, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 23750, __pyx_L3_error) } __pyx_v_memPool = values[0]; __pyx_v_attr = values[1]; __pyx_v_value = values[2]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemPoolSetAttribute", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 23750, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemPoolSetAttribute", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_attr) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "attr"); __PYX_ERR(0, 23751, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_302cudaMemPoolSetAttribute(__pyx_self, __pyx_v_memPool, __pyx_v_attr, __pyx_v_value); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_302cudaMemPoolSetAttribute(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_memPool, PyObject *__pyx_v_attr, PyObject *__pyx_v_value) { cudaMemPool_t __pyx_v_cymemPool; PyObject *__pyx_v_pmemPool = NULL; enum cudaMemPoolAttr __pyx_v_cyattr; struct __pyx_obj_4cuda_8bindings_7runtime__HelperCUmemPool_attribute *__pyx_v_cyvalue = 0; void *__pyx_v_cyvalue_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; enum cudaMemPoolAttr __pyx_t_8; PyObject *__pyx_t_9 = NULL; cudaError_t __pyx_t_10; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemPoolSetAttribute", 0); /* "cuda/bindings/runtime.pyx":23809 * """ * cdef cyruntime.cudaMemPool_t cymemPool * if memPool is None: # <<<<<<<<<<<<<< * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): */ __pyx_t_1 = (__pyx_v_memPool == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":23810 * cdef cyruntime.cudaMemPool_t cymemPool * if memPool is None: * pmemPool = 0 # <<<<<<<<<<<<<< * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): * pmemPool = int(memPool) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pmemPool = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":23809 * """ * cdef cyruntime.cudaMemPool_t cymemPool * if memPool is None: # <<<<<<<<<<<<<< * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":23811 * if memPool is None: * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): # <<<<<<<<<<<<<< * pmemPool = int(memPool) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_memPool, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_memPool, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUmemoryPool); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":23812 * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): * pmemPool = int(memPool) # <<<<<<<<<<<<<< * else: * pmemPool = int(cudaMemPool_t(memPool)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_memPool); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23812, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pmemPool = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":23811 * if memPool is None: * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): # <<<<<<<<<<<<<< * pmemPool = int(memPool) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":23814 * pmemPool = int(memPool) * else: * pmemPool = int(cudaMemPool_t(memPool)) # <<<<<<<<<<<<<< * cymemPool = pmemPool * cdef cyruntime.cudaMemPoolAttr cyattr = attr.value */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_memPool}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23814, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23814, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pmemPool = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":23815 * else: * pmemPool = int(cudaMemPool_t(memPool)) * cymemPool = pmemPool # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemPoolAttr cyattr = attr.value * cdef _HelperCUmemPool_attribute cyvalue = _HelperCUmemPool_attribute(attr, value, is_getter=False) */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pmemPool); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 23815, __pyx_L1_error) __pyx_v_cymemPool = ((cudaMemPool_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":23816 * pmemPool = int(cudaMemPool_t(memPool)) * cymemPool = pmemPool * cdef cyruntime.cudaMemPoolAttr cyattr = attr.value # <<<<<<<<<<<<<< * cdef _HelperCUmemPool_attribute cyvalue = _HelperCUmemPool_attribute(attr, value, is_getter=False) * cdef void* cyvalue_ptr = cyvalue.cptr */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_attr, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23816, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_8 = ((enum cudaMemPoolAttr)__Pyx_PyLong_As_enum__cudaMemPoolAttr(__pyx_t_5)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 23816, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_cyattr = __pyx_t_8; /* "cuda/bindings/runtime.pyx":23817 * cymemPool = pmemPool * cdef cyruntime.cudaMemPoolAttr cyattr = attr.value * cdef _HelperCUmemPool_attribute cyvalue = _HelperCUmemPool_attribute(attr, value, is_getter=False) # <<<<<<<<<<<<<< * cdef void* cyvalue_ptr = cyvalue.cptr * with nogil: */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperCUmemPool_attribute); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperCUmemPool_attribute); __pyx_t_6 = 1; { PyObject *__pyx_callargs[3 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_3, __pyx_v_attr, __pyx_v_value}; __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 23817, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_is_getter, Py_False, __pyx_t_9, __pyx_callargs+3, 0) < (0)) __PYX_ERR(0, 23817, __pyx_L1_error) __pyx_t_5 = __Pyx_Object_Vectorcall_CallFromBuilder(__pyx_t_4, __pyx_callargs+__pyx_t_6, (3-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23817, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_cyvalue = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperCUmemPool_attribute *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":23818 * cdef cyruntime.cudaMemPoolAttr cyattr = attr.value * cdef _HelperCUmemPool_attribute cyvalue = _HelperCUmemPool_attribute(attr, value, is_getter=False) * cdef void* cyvalue_ptr = cyvalue.cptr # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemPoolSetAttribute(cymemPool, cyattr, cyvalue_ptr) */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cyvalue), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23818, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_5); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 23818, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_cyvalue_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":23819 * cdef _HelperCUmemPool_attribute cyvalue = _HelperCUmemPool_attribute(attr, value, is_getter=False) * cdef void* cyvalue_ptr = cyvalue.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemPoolSetAttribute(cymemPool, cyattr, cyvalue_ptr) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":23820 * cdef void* cyvalue_ptr = cyvalue.cptr * with nogil: * err = cyruntime.cudaMemPoolSetAttribute(cymemPool, cyattr, cyvalue_ptr) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_10 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemPoolSetAttribute(__pyx_v_cymemPool, __pyx_v_cyattr, __pyx_v_cyvalue_ptr); if (unlikely(__pyx_t_10 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23820, __pyx_L7_error) __pyx_v_err = __pyx_t_10; } /* "cuda/bindings/runtime.pyx":23819 * cdef _HelperCUmemPool_attribute cyvalue = _HelperCUmemPool_attribute(attr, value, is_getter=False) * cdef void* cyvalue_ptr = cyvalue.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemPoolSetAttribute(cymemPool, cyattr, cyvalue_ptr) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":23821 * with nogil: * err = cyruntime.cudaMemPoolSetAttribute(cymemPool, cyattr, cyvalue_ptr) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23821, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23821, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_9 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 23821, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23821, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_9); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_9) != (0)) __PYX_ERR(0, 23821, __pyx_L1_error); __pyx_t_9 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":23750 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemPoolSetAttribute(memPool, attr not None : cudaMemPoolAttr, value): * """ Sets attributes of a memory pool. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemPoolSetAttribute", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pmemPool); __Pyx_XDECREF((PyObject *)__pyx_v_cyvalue); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":23823 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemPoolGetAttribute(memPool, attr not None : cudaMemPoolAttr): * """ Gets attributes of a memory pool. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_305cudaMemPoolGetAttribute(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_304cudaMemPoolGetAttribute, "cudaMemPoolGetAttribute(memPool, attr: cudaMemPoolAttr)\n\nGets attributes of a memory pool.\n\nSupported attributes are:\n\n- :py:obj:`~.cudaMemPoolAttrReleaseThreshold`: (value type =\n cuuint64_t) Amount of reserved memory in bytes to hold onto before\n trying to release memory back to the OS. When more than the release\n threshold bytes of memory are held by the memory pool, the allocator\n will try to release memory back to the OS on the next call to stream,\n event or context synchronize. (default 0)\n\n- :py:obj:`~.cudaMemPoolReuseFollowEventDependencies`: (value type =\n int) Allow :py:obj:`~.cudaMallocAsync` to use memory asynchronously\n freed in another stream as long as a stream ordering dependency of\n the allocating stream on the free action exists. Cuda events and null\n stream interactions can create the required stream ordered\n dependencies. (default enabled)\n\n- :py:obj:`~.cudaMemPoolReuseAllowOpportunistic`: (value type = int)\n Allow reuse of already completed frees when there is no dependency\n between the free and allocation. (default enabled)\n\n- :py:obj:`~.cudaMemPoolReuseAllowInternalDependencies`: (value type =\n int) Allow :py:obj:`~.cudaMallocAsync` to insert new stream\n dependencies in order to establish the stream ordering required to\n reuse a piece of memory released by :py:obj:`~.cudaFreeAsync`\n (default enabled).\n\n- :py:obj:`~.cudaMemPoolAttrReservedMemCurrent`: (value type =\n cuuint64_t) Amount of backing memory currently allocated for the\n mempool.\n\n- :py:obj:`~.cudaMemPoolAttrReservedMemHigh`: (value type = cuuint64_t)\n High watermark of backing memory allocated for the mempool since the\n last time it was reset.\n\n- :py:obj:`~.cudaMemPoolAttrUsedMemCurrent`: (value type = cuuint64_t)\n Amount of memory from the pool that is currently in use by the\n application.\n\n- :py:obj:`~.cudaMemPoolAttrUsedMemHigh`: (value type = cuuint64_t)\n High watermark of the amount of memory from the pool that wa""s in use\n by the application since the last time it was reset.\n\nParameters\n----------\npool : :py:obj:`~.CUmemoryPool` or :py:obj:`~.cudaMemPool_t`\n The memory pool to get attributes of\nattr : :py:obj:`~.cudaMemPoolAttr`\n The attribute to get\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\nvalue : Any\n Retrieved value\n\nSee Also\n--------\n:py:obj:`~.cuMemPoolGetAttribute`, :py:obj:`~.cudaMallocAsync`, :py:obj:`~.cudaFreeAsync`, :py:obj:`~.cudaDeviceGetDefaultMemPool`, :py:obj:`~.cudaDeviceGetMemPool`, :py:obj:`~.cudaMemPoolCreate`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_305cudaMemPoolGetAttribute = {"cudaMemPoolGetAttribute", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_305cudaMemPoolGetAttribute, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_304cudaMemPoolGetAttribute}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_305cudaMemPoolGetAttribute(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_memPool = 0; PyObject *__pyx_v_attr = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemPoolGetAttribute (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_memPool,&__pyx_mstate_global->__pyx_n_u_attr,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23823, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23823, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23823, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemPoolGetAttribute", 0) < (0)) __PYX_ERR(0, 23823, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemPoolGetAttribute", 1, 2, 2, i); __PYX_ERR(0, 23823, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23823, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23823, __pyx_L3_error) } __pyx_v_memPool = values[0]; __pyx_v_attr = values[1]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemPoolGetAttribute", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 23823, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemPoolGetAttribute", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_attr) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "attr"); __PYX_ERR(0, 23824, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_304cudaMemPoolGetAttribute(__pyx_self, __pyx_v_memPool, __pyx_v_attr); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_304cudaMemPoolGetAttribute(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_memPool, PyObject *__pyx_v_attr) { cudaMemPool_t __pyx_v_cymemPool; PyObject *__pyx_v_pmemPool = NULL; enum cudaMemPoolAttr __pyx_v_cyattr; struct __pyx_obj_4cuda_8bindings_7runtime__HelperCUmemPool_attribute *__pyx_v_cyvalue = 0; void *__pyx_v_cyvalue_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; enum cudaMemPoolAttr __pyx_t_8; PyObject *__pyx_t_9 = NULL; cudaError_t __pyx_t_10; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemPoolGetAttribute", 0); /* "cuda/bindings/runtime.pyx":23888 * """ * cdef cyruntime.cudaMemPool_t cymemPool * if memPool is None: # <<<<<<<<<<<<<< * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): */ __pyx_t_1 = (__pyx_v_memPool == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":23889 * cdef cyruntime.cudaMemPool_t cymemPool * if memPool is None: * pmemPool = 0 # <<<<<<<<<<<<<< * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): * pmemPool = int(memPool) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pmemPool = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":23888 * """ * cdef cyruntime.cudaMemPool_t cymemPool * if memPool is None: # <<<<<<<<<<<<<< * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":23890 * if memPool is None: * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): # <<<<<<<<<<<<<< * pmemPool = int(memPool) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_memPool, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_memPool, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUmemoryPool); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":23891 * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): * pmemPool = int(memPool) # <<<<<<<<<<<<<< * else: * pmemPool = int(cudaMemPool_t(memPool)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_memPool); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23891, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pmemPool = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":23890 * if memPool is None: * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): # <<<<<<<<<<<<<< * pmemPool = int(memPool) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":23893 * pmemPool = int(memPool) * else: * pmemPool = int(cudaMemPool_t(memPool)) # <<<<<<<<<<<<<< * cymemPool = pmemPool * cdef cyruntime.cudaMemPoolAttr cyattr = attr.value */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_memPool}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23893, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23893, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pmemPool = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":23894 * else: * pmemPool = int(cudaMemPool_t(memPool)) * cymemPool = pmemPool # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemPoolAttr cyattr = attr.value * cdef _HelperCUmemPool_attribute cyvalue = _HelperCUmemPool_attribute(attr, 0, is_getter=True) */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pmemPool); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 23894, __pyx_L1_error) __pyx_v_cymemPool = ((cudaMemPool_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":23895 * pmemPool = int(cudaMemPool_t(memPool)) * cymemPool = pmemPool * cdef cyruntime.cudaMemPoolAttr cyattr = attr.value # <<<<<<<<<<<<<< * cdef _HelperCUmemPool_attribute cyvalue = _HelperCUmemPool_attribute(attr, 0, is_getter=True) * cdef void* cyvalue_ptr = cyvalue.cptr */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_attr, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23895, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_8 = ((enum cudaMemPoolAttr)__Pyx_PyLong_As_enum__cudaMemPoolAttr(__pyx_t_5)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 23895, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_cyattr = __pyx_t_8; /* "cuda/bindings/runtime.pyx":23896 * cymemPool = pmemPool * cdef cyruntime.cudaMemPoolAttr cyattr = attr.value * cdef _HelperCUmemPool_attribute cyvalue = _HelperCUmemPool_attribute(attr, 0, is_getter=True) # <<<<<<<<<<<<<< * cdef void* cyvalue_ptr = cyvalue.cptr * with nogil: */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperCUmemPool_attribute); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperCUmemPool_attribute); __pyx_t_6 = 1; { PyObject *__pyx_callargs[3 + ((CYTHON_VECTORCALL) ? 1 : 0)] = {__pyx_t_3, __pyx_v_attr, __pyx_mstate_global->__pyx_int_0}; __pyx_t_9 = __Pyx_MakeVectorcallBuilderKwds(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 23896, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); if (__Pyx_VectorcallBuilder_AddArg(__pyx_mstate_global->__pyx_n_u_is_getter, Py_True, __pyx_t_9, __pyx_callargs+3, 0) < (0)) __PYX_ERR(0, 23896, __pyx_L1_error) __pyx_t_5 = __Pyx_Object_Vectorcall_CallFromBuilder(__pyx_t_4, __pyx_callargs+__pyx_t_6, (3-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET), __pyx_t_9); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23896, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_cyvalue = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperCUmemPool_attribute *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":23897 * cdef cyruntime.cudaMemPoolAttr cyattr = attr.value * cdef _HelperCUmemPool_attribute cyvalue = _HelperCUmemPool_attribute(attr, 0, is_getter=True) * cdef void* cyvalue_ptr = cyvalue.cptr # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemPoolGetAttribute(cymemPool, cyattr, cyvalue_ptr) */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cyvalue), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23897, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_5); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 23897, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_cyvalue_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":23898 * cdef _HelperCUmemPool_attribute cyvalue = _HelperCUmemPool_attribute(attr, 0, is_getter=True) * cdef void* cyvalue_ptr = cyvalue.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemPoolGetAttribute(cymemPool, cyattr, cyvalue_ptr) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":23899 * cdef void* cyvalue_ptr = cyvalue.cptr * with nogil: * err = cyruntime.cudaMemPoolGetAttribute(cymemPool, cyattr, cyvalue_ptr) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_10 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemPoolGetAttribute(__pyx_v_cymemPool, __pyx_v_cyattr, __pyx_v_cyvalue_ptr); if (unlikely(__pyx_t_10 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23899, __pyx_L7_error) __pyx_v_err = __pyx_t_10; } /* "cuda/bindings/runtime.pyx":23898 * cdef _HelperCUmemPool_attribute cyvalue = _HelperCUmemPool_attribute(attr, 0, is_getter=True) * cdef void* cyvalue_ptr = cyvalue.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemPoolGetAttribute(cymemPool, cyattr, cyvalue_ptr) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":23900 * with nogil: * err = cyruntime.cudaMemPoolGetAttribute(cymemPool, cyattr, cyvalue_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], cyvalue.pyObj()) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":23901 * err = cyruntime.cudaMemPoolGetAttribute(cymemPool, cyattr, cyvalue_ptr) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], cyvalue.pyObj()) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23901, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23901, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_9 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 23901, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23901, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_9); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_9) != (0)) __PYX_ERR(0, 23901, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, Py_None) != (0)) __PYX_ERR(0, 23901, __pyx_L1_error); __pyx_t_9 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":23900 * with nogil: * err = cyruntime.cudaMemPoolGetAttribute(cymemPool, cyattr, cyvalue_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], cyvalue.pyObj()) */ } /* "cuda/bindings/runtime.pyx":23902 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], cyvalue.pyObj()) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23902, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_9 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 23902, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_9); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23902, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_4 = ((PyObject *)__pyx_v_cyvalue); __Pyx_INCREF(__pyx_t_4); __pyx_t_6 = 0; { PyObject *__pyx_callargs[2] = {__pyx_t_4, NULL}; __pyx_t_9 = __Pyx_PyObject_FastCallMethod(__pyx_mstate_global->__pyx_n_u_pyObj, __pyx_callargs+__pyx_t_6, (1-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 23902, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); } __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23902, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 23902, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_9); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_9) != (0)) __PYX_ERR(0, 23902, __pyx_L1_error); __pyx_t_5 = 0; __pyx_t_9 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":23823 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemPoolGetAttribute(memPool, attr not None : cudaMemPoolAttr): * """ Gets attributes of a memory pool. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemPoolGetAttribute", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pmemPool); __Pyx_XDECREF((PyObject *)__pyx_v_cyvalue); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":23904 * return (_dict_cudaError_t[err], cyvalue.pyObj()) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemPoolSetAccess(memPool, descList : Optional[tuple[cudaMemAccessDesc] | list[cudaMemAccessDesc]], size_t count): * """ Controls visibility of pools between devices. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_307cudaMemPoolSetAccess(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_306cudaMemPoolSetAccess, "cudaMemPoolSetAccess(memPool, descList: Optional[tuple[cudaMemAccessDesc] | list[cudaMemAccessDesc]], size_t count)\n\nControls visibility of pools between devices.\n\nParameters\n----------\npool : :py:obj:`~.CUmemoryPool` or :py:obj:`~.cudaMemPool_t`\n The pool being modified\nmap : list[:py:obj:`~.cudaMemAccessDesc`]\n Array of access descriptors. Each descriptor instructs the access\n to enable for a single gpu\ncount : size_t\n Number of descriptors in the map array.\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\n\nSee Also\n--------\n:py:obj:`~.cuMemPoolSetAccess`, :py:obj:`~.cudaMemPoolGetAccess`, :py:obj:`~.cudaMallocAsync`, :py:obj:`~.cudaFreeAsync`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_307cudaMemPoolSetAccess = {"cudaMemPoolSetAccess", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_307cudaMemPoolSetAccess, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_306cudaMemPoolSetAccess}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_307cudaMemPoolSetAccess(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_memPool = 0; PyObject *__pyx_v_descList = 0; size_t __pyx_v_count; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[3] = {0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemPoolSetAccess (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_memPool,&__pyx_mstate_global->__pyx_n_u_descList,&__pyx_mstate_global->__pyx_n_u_count,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23904, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 23904, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23904, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23904, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemPoolSetAccess", 0) < (0)) __PYX_ERR(0, 23904, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 3; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemPoolSetAccess", 1, 3, 3, i); __PYX_ERR(0, 23904, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 3)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23904, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23904, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 23904, __pyx_L3_error) } __pyx_v_memPool = values[0]; __pyx_v_descList = values[1]; __pyx_v_count = __Pyx_PyLong_As_size_t(values[2]); if (unlikely((__pyx_v_count == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 23905, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemPoolSetAccess", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 23904, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemPoolSetAccess", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_306cudaMemPoolSetAccess(__pyx_self, __pyx_v_memPool, __pyx_v_descList, __pyx_v_count); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_20cudaMemPoolSetAccess_2generator93(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value); /* proto */ /* "cuda/bindings/runtime.pyx":23928 * """ * descList = [] if descList is None else descList * if not all(isinstance(_x, (cudaMemAccessDesc,)) for _x in descList): # <<<<<<<<<<<<<< * raise TypeError("Argument 'descList' is not instance of type (expected tuple[cyruntime.cudaMemAccessDesc,] or list[cyruntime.cudaMemAccessDesc,]") * cdef cyruntime.cudaMemPool_t cymemPool */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_20cudaMemPoolSetAccess_genexpr(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_genexpr_arg_0) { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_93_genexpr *__pyx_cur_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("genexpr", 0); __pyx_cur_scope = (struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_93_genexpr *)__pyx_tp_new_4cuda_8bindings_7runtime___pyx_scope_struct_93_genexpr(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime___pyx_scope_struct_93_genexpr, __pyx_mstate_global->__pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_93_genexpr *)Py_None); __Pyx_INCREF(Py_None); __PYX_ERR(0, 23928, __pyx_L1_error) } else { __Pyx_GOTREF((PyObject *)__pyx_cur_scope); } __pyx_cur_scope->__pyx_genexpr_arg_0 = __pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_cur_scope->__pyx_genexpr_arg_0); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_genexpr_arg_0); { __pyx_CoroutineObject *gen = __Pyx_Generator_New((__pyx_coroutine_body_t) __pyx_gb_4cuda_8bindings_7runtime_20cudaMemPoolSetAccess_2generator93, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[93]), (PyObject *) __pyx_cur_scope, __pyx_mstate_global->__pyx_n_u_genexpr, __pyx_mstate_global->__pyx_n_u_cudaMemPoolSetAccess_locals_gene, __pyx_mstate_global->__pyx_n_u_cuda_bindings_runtime); if (unlikely(!gen)) __PYX_ERR(0, 23928, __pyx_L1_error) __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; } /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemPoolSetAccess.genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_DECREF((PyObject *)__pyx_cur_scope); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_20cudaMemPoolSetAccess_2generator93(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value) /* generator body */ { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_93_genexpr *__pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_93_genexpr *)__pyx_generator->closure); PyObject *__pyx_r = NULL; PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *(*__pyx_t_3)(PyObject *); PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("genexpr", 0); switch (__pyx_generator->resume_label) { case 0: goto __pyx_L3_first_run; default: /* CPython raises the right error here */ __Pyx_RefNannyFinishContext(); return NULL; } __pyx_L3_first_run:; if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 23928, __pyx_L1_error) if (unlikely(!__pyx_cur_scope->__pyx_genexpr_arg_0)) { __Pyx_RaiseUnboundLocalError(".0"); __PYX_ERR(0, 23928, __pyx_L1_error) } if (likely(PyList_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) || PyTuple_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) { __pyx_t_1 = __pyx_cur_scope->__pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; __pyx_t_3 = NULL; } else { __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_cur_scope->__pyx_genexpr_arg_0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23928, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_3)) { if (likely(PyList_CheckExact(__pyx_t_1))) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 23928, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } __pyx_t_4 = __Pyx_PyList_GetItemRef(__pyx_t_1, __pyx_t_2); ++__pyx_t_2; } else { { Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 23928, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2)); #else __pyx_t_4 = __Pyx_PySequence_ITEM(__pyx_t_1, __pyx_t_2); #endif ++__pyx_t_2; } if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23928, __pyx_L1_error) } else { __pyx_t_4 = __pyx_t_3(__pyx_t_1); if (unlikely(!__pyx_t_4)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 23928, __pyx_L1_error) PyErr_Clear(); } break; } } __Pyx_GOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v__x); __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v__x, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __Pyx_TypeCheck(__pyx_cur_scope->__pyx_v__x, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemAccessDesc); __pyx_t_6 = (!__pyx_t_5); if (__pyx_t_6) { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_False); __pyx_r = Py_False; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_True); __pyx_r = Py_True; goto __pyx_L0; } CYTHON_MAYBE_UNUSED_VAR(__pyx_cur_scope); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); if (__Pyx_PyErr_Occurred()) { __Pyx_Generator_Replace_StopIteration(0); __Pyx_AddTraceback("genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); } __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); #if !CYTHON_USE_EXC_INFO_STACK __Pyx_Coroutine_ResetAndClearException(__pyx_generator); #endif __pyx_generator->resume_label = -1; __Pyx_Coroutine_clear((PyObject*)__pyx_generator); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":23904 * return (_dict_cudaError_t[err], cyvalue.pyObj()) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemPoolSetAccess(memPool, descList : Optional[tuple[cudaMemAccessDesc] | list[cudaMemAccessDesc]], size_t count): * """ Controls visibility of pools between devices. */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_306cudaMemPoolSetAccess(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_memPool, PyObject *__pyx_v_descList, size_t __pyx_v_count) { cudaMemPool_t __pyx_v_cymemPool; PyObject *__pyx_v_pmemPool = NULL; struct cudaMemAccessDesc *__pyx_v_cydescList; Py_ssize_t __pyx_v_idx; cudaError_t __pyx_v_err; PyObject *__pyx_gb_4cuda_8bindings_7runtime_20cudaMemPoolSetAccess_2generator93 = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; Py_ssize_t __pyx_t_8; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; Py_ssize_t __pyx_t_12; Py_ssize_t __pyx_t_13; struct cudaMemAccessDesc *__pyx_t_14; cudaError_t __pyx_t_15; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemPoolSetAccess", 0); __Pyx_INCREF(__pyx_v_descList); /* "cuda/bindings/runtime.pyx":23927 * :py:obj:`~.cuMemPoolSetAccess`, :py:obj:`~.cudaMemPoolGetAccess`, :py:obj:`~.cudaMallocAsync`, :py:obj:`~.cudaFreeAsync` * """ * descList = [] if descList is None else descList # <<<<<<<<<<<<<< * if not all(isinstance(_x, (cudaMemAccessDesc,)) for _x in descList): * raise TypeError("Argument 'descList' is not instance of type (expected tuple[cyruntime.cudaMemAccessDesc,] or list[cyruntime.cudaMemAccessDesc,]") */ __pyx_t_2 = (__pyx_v_descList == Py_None); if (__pyx_t_2) { __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23927, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = __pyx_t_3; __pyx_t_3 = 0; } else { __Pyx_INCREF(__pyx_v_descList); __pyx_t_1 = __pyx_v_descList; } __Pyx_DECREF_SET(__pyx_v_descList, __pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":23928 * """ * descList = [] if descList is None else descList * if not all(isinstance(_x, (cudaMemAccessDesc,)) for _x in descList): # <<<<<<<<<<<<<< * raise TypeError("Argument 'descList' is not instance of type (expected tuple[cyruntime.cudaMemAccessDesc,] or list[cyruntime.cudaMemAccessDesc,]") * cdef cyruntime.cudaMemPool_t cymemPool */ __pyx_t_1 = __pyx_pf_4cuda_8bindings_7runtime_20cudaMemPoolSetAccess_genexpr(NULL, __pyx_v_descList); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_Generator_GetInlinedResult(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 23928, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_4 = (!__pyx_t_2); if (unlikely(__pyx_t_4)) { /* "cuda/bindings/runtime.pyx":23929 * descList = [] if descList is None else descList * if not all(isinstance(_x, (cudaMemAccessDesc,)) for _x in descList): * raise TypeError("Argument 'descList' is not instance of type (expected tuple[cyruntime.cudaMemAccessDesc,] or list[cyruntime.cudaMemAccessDesc,]") # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemPool_t cymemPool * if memPool is None: */ __pyx_t_1 = NULL; __Pyx_INCREF(__pyx_builtin_TypeError); __pyx_t_5 = __pyx_builtin_TypeError; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_Argument_descList_is_not_instanc}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23929, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 23929, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":23928 * """ * descList = [] if descList is None else descList * if not all(isinstance(_x, (cudaMemAccessDesc,)) for _x in descList): # <<<<<<<<<<<<<< * raise TypeError("Argument 'descList' is not instance of type (expected tuple[cyruntime.cudaMemAccessDesc,] or list[cyruntime.cudaMemAccessDesc,]") * cdef cyruntime.cudaMemPool_t cymemPool */ } /* "cuda/bindings/runtime.pyx":23931 * raise TypeError("Argument 'descList' is not instance of type (expected tuple[cyruntime.cudaMemAccessDesc,] or list[cyruntime.cudaMemAccessDesc,]") * cdef cyruntime.cudaMemPool_t cymemPool * if memPool is None: # <<<<<<<<<<<<<< * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): */ __pyx_t_4 = (__pyx_v_memPool == Py_None); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":23932 * cdef cyruntime.cudaMemPool_t cymemPool * if memPool is None: * pmemPool = 0 # <<<<<<<<<<<<<< * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): * pmemPool = int(memPool) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pmemPool = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":23931 * raise TypeError("Argument 'descList' is not instance of type (expected tuple[cyruntime.cudaMemAccessDesc,] or list[cyruntime.cudaMemAccessDesc,]") * cdef cyruntime.cudaMemPool_t cymemPool * if memPool is None: # <<<<<<<<<<<<<< * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): */ goto __pyx_L4; } /* "cuda/bindings/runtime.pyx":23933 * if memPool is None: * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): # <<<<<<<<<<<<<< * pmemPool = int(memPool) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_memPool, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); if (!__pyx_t_2) { } else { __pyx_t_4 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_memPool, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUmemoryPool); __pyx_t_4 = __pyx_t_2; __pyx_L5_bool_binop_done:; if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":23934 * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): * pmemPool = int(memPool) # <<<<<<<<<<<<<< * else: * pmemPool = int(cudaMemPool_t(memPool)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_memPool); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23934, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pmemPool = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":23933 * if memPool is None: * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): # <<<<<<<<<<<<<< * pmemPool = int(memPool) * else: */ goto __pyx_L4; } /* "cuda/bindings/runtime.pyx":23936 * pmemPool = int(memPool) * else: * pmemPool = int(cudaMemPool_t(memPool)) # <<<<<<<<<<<<<< * cymemPool = pmemPool * cdef cyruntime.cudaMemAccessDesc* cydescList = NULL */ /*else*/ { __pyx_t_5 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); __pyx_t_1 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_v_memPool}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23936, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_1 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23936, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pmemPool = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; } __pyx_L4:; /* "cuda/bindings/runtime.pyx":23937 * else: * pmemPool = int(cudaMemPool_t(memPool)) * cymemPool = pmemPool # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemAccessDesc* cydescList = NULL * if len(descList) > 1: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pmemPool); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 23937, __pyx_L1_error) __pyx_v_cymemPool = ((cudaMemPool_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":23938 * pmemPool = int(cudaMemPool_t(memPool)) * cymemPool = pmemPool * cdef cyruntime.cudaMemAccessDesc* cydescList = NULL # <<<<<<<<<<<<<< * if len(descList) > 1: * cydescList = calloc(len(descList), sizeof(cyruntime.cudaMemAccessDesc)) */ __pyx_v_cydescList = NULL; /* "cuda/bindings/runtime.pyx":23939 * cymemPool = pmemPool * cdef cyruntime.cudaMemAccessDesc* cydescList = NULL * if len(descList) > 1: # <<<<<<<<<<<<<< * cydescList = calloc(len(descList), sizeof(cyruntime.cudaMemAccessDesc)) * if cydescList is NULL: */ __pyx_t_8 = PyObject_Length(__pyx_v_descList); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 23939, __pyx_L1_error) __pyx_t_4 = (__pyx_t_8 > 1); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":23940 * cdef cyruntime.cudaMemAccessDesc* cydescList = NULL * if len(descList) > 1: * cydescList = calloc(len(descList), sizeof(cyruntime.cudaMemAccessDesc)) # <<<<<<<<<<<<<< * if cydescList is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(descList)) + 'x' + str(sizeof(cyruntime.cudaMemAccessDesc))) */ __pyx_t_8 = PyObject_Length(__pyx_v_descList); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 23940, __pyx_L1_error) __pyx_v_cydescList = ((struct cudaMemAccessDesc *)calloc(__pyx_t_8, (sizeof(struct cudaMemAccessDesc)))); /* "cuda/bindings/runtime.pyx":23941 * if len(descList) > 1: * cydescList = calloc(len(descList), sizeof(cyruntime.cudaMemAccessDesc)) * if cydescList is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(descList)) + 'x' + str(sizeof(cyruntime.cudaMemAccessDesc))) * for idx in range(len(descList)): */ __pyx_t_4 = (__pyx_v_cydescList == NULL); if (unlikely(__pyx_t_4)) { /* "cuda/bindings/runtime.pyx":23942 * cydescList = calloc(len(descList), sizeof(cyruntime.cudaMemAccessDesc)) * if cydescList is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(descList)) + 'x' + str(sizeof(cyruntime.cudaMemAccessDesc))) # <<<<<<<<<<<<<< * for idx in range(len(descList)): * string.memcpy(&cydescList[idx], (descList[idx])._pvt_ptr, sizeof(cyruntime.cudaMemAccessDesc)) */ __pyx_t_3 = NULL; __Pyx_INCREF(__pyx_builtin_MemoryError); __pyx_t_5 = __pyx_builtin_MemoryError; __pyx_t_8 = PyObject_Length(__pyx_v_descList); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 23942, __pyx_L1_error) __pyx_t_9 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 23942, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 23942, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_Failed_to_allocate_length_x_size, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 23942, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_x_2); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 23942, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyLong_FromSize_t((sizeof(struct cudaMemAccessDesc))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 23942, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_9); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 23942, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_10, __pyx_t_11); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 23942, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_9}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23942, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 23942, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":23941 * if len(descList) > 1: * cydescList = calloc(len(descList), sizeof(cyruntime.cudaMemAccessDesc)) * if cydescList is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(descList)) + 'x' + str(sizeof(cyruntime.cudaMemAccessDesc))) * for idx in range(len(descList)): */ } /* "cuda/bindings/runtime.pyx":23943 * if cydescList is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(descList)) + 'x' + str(sizeof(cyruntime.cudaMemAccessDesc))) * for idx in range(len(descList)): # <<<<<<<<<<<<<< * string.memcpy(&cydescList[idx], (descList[idx])._pvt_ptr, sizeof(cyruntime.cudaMemAccessDesc)) * elif len(descList) == 1: */ __pyx_t_8 = PyObject_Length(__pyx_v_descList); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 23943, __pyx_L1_error) __pyx_t_12 = __pyx_t_8; for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { __pyx_v_idx = __pyx_t_13; /* "cuda/bindings/runtime.pyx":23944 * raise MemoryError('Failed to allocate length x size memory: ' + str(len(descList)) + 'x' + str(sizeof(cyruntime.cudaMemAccessDesc))) * for idx in range(len(descList)): * string.memcpy(&cydescList[idx], (descList[idx])._pvt_ptr, sizeof(cyruntime.cudaMemAccessDesc)) # <<<<<<<<<<<<<< * elif len(descList) == 1: * cydescList = (descList[0])._pvt_ptr */ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_descList, __pyx_v_idx, Py_ssize_t, 1, PyLong_FromSsize_t, 0, 1, 1, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23944, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); (void)(memcpy((&(__pyx_v_cydescList[__pyx_v_idx])), ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemAccessDesc *)__pyx_t_1)->_pvt_ptr, (sizeof(struct cudaMemAccessDesc)))); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } /* "cuda/bindings/runtime.pyx":23939 * cymemPool = pmemPool * cdef cyruntime.cudaMemAccessDesc* cydescList = NULL * if len(descList) > 1: # <<<<<<<<<<<<<< * cydescList = calloc(len(descList), sizeof(cyruntime.cudaMemAccessDesc)) * if cydescList is NULL: */ goto __pyx_L7; } /* "cuda/bindings/runtime.pyx":23945 * for idx in range(len(descList)): * string.memcpy(&cydescList[idx], (descList[idx])._pvt_ptr, sizeof(cyruntime.cudaMemAccessDesc)) * elif len(descList) == 1: # <<<<<<<<<<<<<< * cydescList = (descList[0])._pvt_ptr * if count > len(descList): raise RuntimeError("List is too small: " + str(len(descList)) + " < " + str(count)) */ __pyx_t_8 = PyObject_Length(__pyx_v_descList); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 23945, __pyx_L1_error) __pyx_t_4 = (__pyx_t_8 == 1); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":23946 * string.memcpy(&cydescList[idx], (descList[idx])._pvt_ptr, sizeof(cyruntime.cudaMemAccessDesc)) * elif len(descList) == 1: * cydescList = (descList[0])._pvt_ptr # <<<<<<<<<<<<<< * if count > len(descList): raise RuntimeError("List is too small: " + str(len(descList)) + " < " + str(count)) * with nogil: */ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_descList, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23946, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_14 = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemAccessDesc *)__pyx_t_1)->_pvt_ptr; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cydescList = __pyx_t_14; /* "cuda/bindings/runtime.pyx":23945 * for idx in range(len(descList)): * string.memcpy(&cydescList[idx], (descList[idx])._pvt_ptr, sizeof(cyruntime.cudaMemAccessDesc)) * elif len(descList) == 1: # <<<<<<<<<<<<<< * cydescList = (descList[0])._pvt_ptr * if count > len(descList): raise RuntimeError("List is too small: " + str(len(descList)) + " < " + str(count)) */ } __pyx_L7:; /* "cuda/bindings/runtime.pyx":23947 * elif len(descList) == 1: * cydescList = (descList[0])._pvt_ptr * if count > len(descList): raise RuntimeError("List is too small: " + str(len(descList)) + " < " + str(count)) # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemPoolSetAccess(cymemPool, cydescList, count) */ __pyx_t_8 = PyObject_Length(__pyx_v_descList); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 23947, __pyx_L1_error) __pyx_t_4 = (__pyx_v_count > ((size_t)__pyx_t_8)); if (unlikely(__pyx_t_4)) { __pyx_t_5 = NULL; __Pyx_INCREF(__pyx_builtin_RuntimeError); __pyx_t_9 = __pyx_builtin_RuntimeError; __pyx_t_8 = PyObject_Length(__pyx_v_descList); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 23947, __pyx_L1_error) __pyx_t_3 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23947, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 23947, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_List_is_too_small, __pyx_t_11); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23947, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_11 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_3, __pyx_mstate_global->__pyx_kp_u__6); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 23947, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyLong_FromSize_t(__pyx_v_count); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23947, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 23947, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_11, __pyx_t_10); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23947, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_3}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_9, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23947, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 23947, __pyx_L1_error) } /* "cuda/bindings/runtime.pyx":23948 * cydescList = (descList[0])._pvt_ptr * if count > len(descList): raise RuntimeError("List is too small: " + str(len(descList)) + " < " + str(count)) * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemPoolSetAccess(cymemPool, cydescList, count) * if len(descList) > 1 and cydescList is not NULL: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":23949 * if count > len(descList): raise RuntimeError("List is too small: " + str(len(descList)) + " < " + str(count)) * with nogil: * err = cyruntime.cudaMemPoolSetAccess(cymemPool, cydescList, count) # <<<<<<<<<<<<<< * if len(descList) > 1 and cydescList is not NULL: * free(cydescList) */ __pyx_t_15 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemPoolSetAccess(__pyx_v_cymemPool, __pyx_v_cydescList, __pyx_v_count); if (unlikely(__pyx_t_15 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23949, __pyx_L13_error) __pyx_v_err = __pyx_t_15; } /* "cuda/bindings/runtime.pyx":23948 * cydescList = (descList[0])._pvt_ptr * if count > len(descList): raise RuntimeError("List is too small: " + str(len(descList)) + " < " + str(count)) * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemPoolSetAccess(cymemPool, cydescList, count) * if len(descList) > 1 and cydescList is not NULL: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L14; } __pyx_L13_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L14:; } } /* "cuda/bindings/runtime.pyx":23950 * with nogil: * err = cyruntime.cudaMemPoolSetAccess(cymemPool, cydescList, count) * if len(descList) > 1 and cydescList is not NULL: # <<<<<<<<<<<<<< * free(cydescList) * return (_dict_cudaError_t[err],) */ __pyx_t_8 = PyObject_Length(__pyx_v_descList); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 23950, __pyx_L1_error) __pyx_t_2 = (__pyx_t_8 > 1); if (__pyx_t_2) { } else { __pyx_t_4 = __pyx_t_2; goto __pyx_L16_bool_binop_done; } __pyx_t_2 = (__pyx_v_cydescList != NULL); __pyx_t_4 = __pyx_t_2; __pyx_L16_bool_binop_done:; if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":23951 * err = cyruntime.cudaMemPoolSetAccess(cymemPool, cydescList, count) * if len(descList) > 1 and cydescList is not NULL: * free(cydescList) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ free(__pyx_v_cydescList); /* "cuda/bindings/runtime.pyx":23950 * with nogil: * err = cyruntime.cudaMemPoolSetAccess(cymemPool, cydescList, count) * if len(descList) > 1 and cydescList is not NULL: # <<<<<<<<<<<<<< * free(cydescList) * return (_dict_cudaError_t[err],) */ } /* "cuda/bindings/runtime.pyx":23952 * if len(descList) > 1 and cydescList is not NULL: * free(cydescList) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 23952, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_9 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 23952, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_9); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23952, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 23952, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 23952, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_9; __pyx_t_9 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":23904 * return (_dict_cudaError_t[err], cyvalue.pyObj()) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemPoolSetAccess(memPool, descList : Optional[tuple[cudaMemAccessDesc] | list[cudaMemAccessDesc]], size_t count): * """ Controls visibility of pools between devices. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemPoolSetAccess", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pmemPool); __Pyx_XDECREF(__pyx_gb_4cuda_8bindings_7runtime_20cudaMemPoolSetAccess_2generator93); __Pyx_XDECREF(__pyx_v_descList); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":23954 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemPoolGetAccess(memPool, location : Optional[cudaMemLocation]): * """ Returns the accessibility of a pool from a device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_309cudaMemPoolGetAccess(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_308cudaMemPoolGetAccess, "cudaMemPoolGetAccess(memPool, cudaMemLocation location: Optional[cudaMemLocation])\n\nReturns the accessibility of a pool from a device.\n\nReturns the accessibility of the pool's memory from the specified\nlocation.\n\nParameters\n----------\nmemPool : :py:obj:`~.CUmemoryPool` or :py:obj:`~.cudaMemPool_t`\n the pool being queried\nlocation : :py:obj:`~.cudaMemLocation`\n the location accessing the pool\n\nReturns\n-------\ncudaError_t\n\nflags : :py:obj:`~.cudaMemAccessFlags`\n the accessibility of the pool from the specified location\n\nSee Also\n--------\n:py:obj:`~.cuMemPoolGetAccess`, :py:obj:`~.cudaMemPoolSetAccess`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_309cudaMemPoolGetAccess = {"cudaMemPoolGetAccess", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_309cudaMemPoolGetAccess, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_308cudaMemPoolGetAccess}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_309cudaMemPoolGetAccess(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_memPool = 0; struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemLocation *__pyx_v_location = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemPoolGetAccess (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_memPool,&__pyx_mstate_global->__pyx_n_u_location_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23954, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23954, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23954, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemPoolGetAccess", 0) < (0)) __PYX_ERR(0, 23954, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemPoolGetAccess", 1, 2, 2, i); __PYX_ERR(0, 23954, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23954, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 23954, __pyx_L3_error) } __pyx_v_memPool = values[0]; __pyx_v_location = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemLocation *)values[1]); } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemPoolGetAccess", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 23954, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemPoolGetAccess", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_location), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemLocation, 1, "location", 0))) __PYX_ERR(0, 23955, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_308cudaMemPoolGetAccess(__pyx_self, __pyx_v_memPool, __pyx_v_location); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_308cudaMemPoolGetAccess(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_memPool, struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemLocation *__pyx_v_location) { cudaMemPool_t __pyx_v_cymemPool; PyObject *__pyx_v_pmemPool = NULL; enum cudaMemAccessFlags __pyx_v_flags; struct cudaMemLocation *__pyx_v_cylocation_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; struct cudaMemLocation *__pyx_t_8; cudaError_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemPoolGetAccess", 0); /* "cuda/bindings/runtime.pyx":23980 * """ * cdef cyruntime.cudaMemPool_t cymemPool * if memPool is None: # <<<<<<<<<<<<<< * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): */ __pyx_t_1 = (__pyx_v_memPool == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":23981 * cdef cyruntime.cudaMemPool_t cymemPool * if memPool is None: * pmemPool = 0 # <<<<<<<<<<<<<< * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): * pmemPool = int(memPool) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pmemPool = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":23980 * """ * cdef cyruntime.cudaMemPool_t cymemPool * if memPool is None: # <<<<<<<<<<<<<< * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":23982 * if memPool is None: * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): # <<<<<<<<<<<<<< * pmemPool = int(memPool) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_memPool, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_memPool, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUmemoryPool); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":23983 * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): * pmemPool = int(memPool) # <<<<<<<<<<<<<< * else: * pmemPool = int(cudaMemPool_t(memPool)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_memPool); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23983, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pmemPool = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":23982 * if memPool is None: * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): # <<<<<<<<<<<<<< * pmemPool = int(memPool) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":23985 * pmemPool = int(memPool) * else: * pmemPool = int(cudaMemPool_t(memPool)) # <<<<<<<<<<<<<< * cymemPool = pmemPool * cdef cyruntime.cudaMemAccessFlags flags */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_memPool}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23985, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23985, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pmemPool = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":23986 * else: * pmemPool = int(cudaMemPool_t(memPool)) * cymemPool = pmemPool # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemAccessFlags flags * cdef cyruntime.cudaMemLocation* cylocation_ptr = location._pvt_ptr if location is not None else NULL */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pmemPool); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 23986, __pyx_L1_error) __pyx_v_cymemPool = ((cudaMemPool_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":23988 * cymemPool = pmemPool * cdef cyruntime.cudaMemAccessFlags flags * cdef cyruntime.cudaMemLocation* cylocation_ptr = location._pvt_ptr if location is not None else NULL # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemPoolGetAccess(&flags, cymemPool, cylocation_ptr) */ __pyx_t_1 = (((PyObject *)__pyx_v_location) != Py_None); if (__pyx_t_1) { __pyx_t_8 = __pyx_v_location->_pvt_ptr; } else { __pyx_t_8 = NULL; } __pyx_v_cylocation_ptr = __pyx_t_8; /* "cuda/bindings/runtime.pyx":23989 * cdef cyruntime.cudaMemAccessFlags flags * cdef cyruntime.cudaMemLocation* cylocation_ptr = location._pvt_ptr if location is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemPoolGetAccess(&flags, cymemPool, cylocation_ptr) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":23990 * cdef cyruntime.cudaMemLocation* cylocation_ptr = location._pvt_ptr if location is not None else NULL * with nogil: * err = cyruntime.cudaMemPoolGetAccess(&flags, cymemPool, cylocation_ptr) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_9 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemPoolGetAccess((&__pyx_v_flags), __pyx_v_cymemPool, __pyx_v_cylocation_ptr); if (unlikely(__pyx_t_9 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23990, __pyx_L7_error) __pyx_v_err = __pyx_t_9; } /* "cuda/bindings/runtime.pyx":23989 * cdef cyruntime.cudaMemAccessFlags flags * cdef cyruntime.cudaMemLocation* cylocation_ptr = location._pvt_ptr if location is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemPoolGetAccess(&flags, cymemPool, cylocation_ptr) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":23991 * with nogil: * err = cyruntime.cudaMemPoolGetAccess(&flags, cymemPool, cylocation_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], cudaMemAccessFlags(flags)) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":23992 * err = cyruntime.cudaMemPoolGetAccess(&flags, cymemPool, cylocation_ptr) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], cudaMemAccessFlags(flags)) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23992, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23992, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23992, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23992, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 23992, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 23992, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":23991 * with nogil: * err = cyruntime.cudaMemPoolGetAccess(&flags, cymemPool, cylocation_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], cudaMemAccessFlags(flags)) */ } /* "cuda/bindings/runtime.pyx":23993 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], cudaMemAccessFlags(flags)) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23993, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23993, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23993, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_3 = NULL; __Pyx_GetModuleGlobalName(__pyx_t_10, __pyx_mstate_global->__pyx_n_u_cudaMemAccessFlags); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 23993, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __pyx_t_11 = __Pyx_PyLong_From_enum__cudaMemAccessFlags(__pyx_v_flags); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 23993, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __pyx_t_6 = 1; #if CYTHON_UNPACK_METHODS if (unlikely(PyMethod_Check(__pyx_t_10))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_10); assert(__pyx_t_3); PyObject* __pyx__function = PyMethod_GET_FUNCTION(__pyx_t_10); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx__function); __Pyx_DECREF_SET(__pyx_t_10, __pyx__function); __pyx_t_6 = 0; } #endif { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_11}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_10, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 23993, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); } __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 23993, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 23993, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 23993, __pyx_L1_error); __pyx_t_5 = 0; __pyx_t_4 = 0; __pyx_r = __pyx_t_10; __pyx_t_10 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":23954 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemPoolGetAccess(memPool, location : Optional[cudaMemLocation]): * """ Returns the accessibility of a pool from a device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_10); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemPoolGetAccess", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pmemPool); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":23995 * return (_dict_cudaError_t[err], cudaMemAccessFlags(flags)) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemPoolCreate(poolProps : Optional[cudaMemPoolProps]): * """ Creates a memory pool. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_311cudaMemPoolCreate(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_310cudaMemPoolCreate, "cudaMemPoolCreate(cudaMemPoolProps poolProps: Optional[cudaMemPoolProps])\n\nCreates a memory pool.\n\nCreates a CUDA memory pool and returns the handle in `pool`. The\n`poolProps` determines the properties of the pool such as the backing\ndevice and IPC capabilities.\n\nTo create a memory pool targeting a specific host NUMA node,\napplications must set\n:py:obj:`~.cudaMemPoolProps`::cudaMemLocation::type to\n:py:obj:`~.cudaMemLocationTypeHostNuma` and\n:py:obj:`~.cudaMemPoolProps`::cudaMemLocation::id must specify the NUMA\nID of the host memory node. Specifying\n:py:obj:`~.cudaMemLocationTypeHostNumaCurrent` or\n:py:obj:`~.cudaMemLocationTypeHost` as the\n:py:obj:`~.cudaMemPoolProps`::cudaMemLocation::type will result in\n:py:obj:`~.cudaErrorInvalidValue`. By default, the pool's memory will\nbe accessible from the device it is allocated on. In the case of pools\ncreated with :py:obj:`~.cudaMemLocationTypeHostNuma`, their default\naccessibility will be from the host CPU. Applications can control the\nmaximum size of the pool by specifying a non-zero value for\n:py:obj:`~.cudaMemPoolProps.maxSize`. If set to 0, the maximum size of\nthe pool will default to a system dependent value.\n\nApplications that intend to use :py:obj:`~.CU_MEM_HANDLE_TYPE_FABRIC`\nbased memory sharing must ensure: (1) `nvidia-caps-imex-channels`\ncharacter device is created by the driver and is listed under\n/proc/devices (2) have at least one IMEX channel file accessible by the\nuser launching the application.\n\nWhen exporter and importer CUDA processes have been granted access to\nthe same IMEX channel, they can securely share memory.\n\nThe IMEX channel security model works on a per user basis. Which means\nall processes under a user can share memory if the user has access to a\nvalid IMEX channel. When multi-user isolation is desired, a separate\nIMEX channel is required for each user.\n\nThese channel files exist in /dev/nvidia-caps-imex-channels/channel*\nand can be created using stand""ard OS native calls like mknod on Linux.\nFor example: To create channel0 with the major number from\n/proc/devices users can execute the following command: `mknod\n/dev/nvidia-caps-imex-channels/channel0 c 0`\n\nParameters\n----------\npoolProps : :py:obj:`~.cudaMemPoolProps`\n None\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorNotSupported`\nmemPool : :py:obj:`~.cudaMemPool_t`\n None\n\nSee Also\n--------\n:py:obj:`~.cuMemPoolCreate`, :py:obj:`~.cudaDeviceSetMemPool`, :py:obj:`~.cudaMallocFromPoolAsync`, :py:obj:`~.cudaMemPoolExportToShareableHandle`, :py:obj:`~.cudaDeviceGetDefaultMemPool`, :py:obj:`~.cudaDeviceGetMemPool`\n\nNotes\n-----\nSpecifying cudaMemHandleTypeNone creates a memory pool that will not support IPC."); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_311cudaMemPoolCreate = {"cudaMemPoolCreate", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_311cudaMemPoolCreate, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_310cudaMemPoolCreate}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_311cudaMemPoolCreate(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemPoolProps *__pyx_v_poolProps = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemPoolCreate (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_poolProps_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 23995, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23995, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemPoolCreate", 0) < (0)) __PYX_ERR(0, 23995, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemPoolCreate", 1, 1, 1, i); __PYX_ERR(0, 23995, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 23995, __pyx_L3_error) } __pyx_v_poolProps = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemPoolProps *)values[0]); } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemPoolCreate", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 23995, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemPoolCreate", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_poolProps), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPoolProps, 1, "poolProps", 0))) __PYX_ERR(0, 23996, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_310cudaMemPoolCreate(__pyx_self, __pyx_v_poolProps); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_310cudaMemPoolCreate(CYTHON_UNUSED PyObject *__pyx_self, struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemPoolProps *__pyx_v_poolProps) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemPool_t *__pyx_v_memPool = 0; struct cudaMemPoolProps *__pyx_v_cypoolProps_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; struct cudaMemPoolProps *__pyx_t_5; int __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemPoolCreate", 0); /* "cuda/bindings/runtime.pyx":24060 * Specifying cudaMemHandleTypeNone creates a memory pool that will not support IPC. * """ * cdef cudaMemPool_t memPool = cudaMemPool_t() # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemPoolProps* cypoolProps_ptr = poolProps._pvt_ptr if poolProps is not None else NULL * with nogil: */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (1-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24060, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_memPool = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemPool_t *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":24061 * """ * cdef cudaMemPool_t memPool = cudaMemPool_t() * cdef cyruntime.cudaMemPoolProps* cypoolProps_ptr = poolProps._pvt_ptr if poolProps is not None else NULL # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemPoolCreate(memPool._pvt_ptr, cypoolProps_ptr) */ __pyx_t_6 = (((PyObject *)__pyx_v_poolProps) != Py_None); if (__pyx_t_6) { __pyx_t_5 = __pyx_v_poolProps->_pvt_ptr; } else { __pyx_t_5 = NULL; } __pyx_v_cypoolProps_ptr = __pyx_t_5; /* "cuda/bindings/runtime.pyx":24062 * cdef cudaMemPool_t memPool = cudaMemPool_t() * cdef cyruntime.cudaMemPoolProps* cypoolProps_ptr = poolProps._pvt_ptr if poolProps is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemPoolCreate(memPool._pvt_ptr, cypoolProps_ptr) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":24063 * cdef cyruntime.cudaMemPoolProps* cypoolProps_ptr = poolProps._pvt_ptr if poolProps is not None else NULL * with nogil: * err = cyruntime.cudaMemPoolCreate(memPool._pvt_ptr, cypoolProps_ptr) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemPoolCreate(((cudaMemPool_t *)__pyx_v_memPool->__pyx_base._pvt_ptr), __pyx_v_cypoolProps_ptr); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24063, __pyx_L4_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":24062 * cdef cudaMemPool_t memPool = cudaMemPool_t() * cdef cyruntime.cudaMemPoolProps* cypoolProps_ptr = poolProps._pvt_ptr if poolProps is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemPoolCreate(memPool._pvt_ptr, cypoolProps_ptr) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":24064 * with nogil: * err = cyruntime.cudaMemPoolCreate(memPool._pvt_ptr, cypoolProps_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], memPool) */ __pyx_t_6 = (__pyx_v_err != cudaSuccess); if (__pyx_t_6) { /* "cuda/bindings/runtime.pyx":24065 * err = cyruntime.cudaMemPoolCreate(memPool._pvt_ptr, cypoolProps_ptr) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], memPool) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24065, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24065, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24065, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24065, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 24065, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 24065, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":24064 * with nogil: * err = cyruntime.cudaMemPoolCreate(memPool._pvt_ptr, cypoolProps_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], memPool) */ } /* "cuda/bindings/runtime.pyx":24066 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], memPool) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24066, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24066, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24066, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24066, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 24066, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_memPool); __Pyx_GIVEREF((PyObject *)__pyx_v_memPool); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_memPool)) != (0)) __PYX_ERR(0, 24066, __pyx_L1_error); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":23995 * return (_dict_cudaError_t[err], cudaMemAccessFlags(flags)) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemPoolCreate(poolProps : Optional[cudaMemPoolProps]): * """ Creates a memory pool. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemPoolCreate", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_memPool); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":24068 * return (_dict_cudaError_t[err], memPool) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemPoolDestroy(memPool): * """ Destroys the specified memory pool. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_313cudaMemPoolDestroy(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_312cudaMemPoolDestroy, "cudaMemPoolDestroy(memPool)\n\nDestroys the specified memory pool.\n\nIf any pointers obtained from this pool haven't been freed or the pool\nhas free operations that haven't completed when\n:py:obj:`~.cudaMemPoolDestroy` is invoked, the function will return\nimmediately and the resources associated with the pool will be released\nautomatically once there are no more outstanding allocations.\n\nDestroying the current mempool of a device sets the default mempool of\nthat device as the current mempool for that device.\n\nParameters\n----------\nmemPool : :py:obj:`~.CUmemoryPool` or :py:obj:`~.cudaMemPool_t`\n None\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\n\nSee Also\n--------\ncuMemPoolDestroy, :py:obj:`~.cudaFreeAsync`, :py:obj:`~.cudaDeviceSetMemPool`, :py:obj:`~.cudaDeviceGetDefaultMemPool`, :py:obj:`~.cudaDeviceGetMemPool`, :py:obj:`~.cudaMemPoolCreate`\n\nNotes\n-----\nA device's default memory pool cannot be destroyed."); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_313cudaMemPoolDestroy = {"cudaMemPoolDestroy", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_313cudaMemPoolDestroy, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_312cudaMemPoolDestroy}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_313cudaMemPoolDestroy(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_memPool = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemPoolDestroy (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_memPool,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24068, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24068, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemPoolDestroy", 0) < (0)) __PYX_ERR(0, 24068, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemPoolDestroy", 1, 1, 1, i); __PYX_ERR(0, 24068, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24068, __pyx_L3_error) } __pyx_v_memPool = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemPoolDestroy", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24068, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemPoolDestroy", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_312cudaMemPoolDestroy(__pyx_self, __pyx_v_memPool); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_312cudaMemPoolDestroy(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_memPool) { cudaMemPool_t __pyx_v_cymemPool; PyObject *__pyx_v_pmemPool = NULL; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemPoolDestroy", 0); /* "cuda/bindings/runtime.pyx":24100 * """ * cdef cyruntime.cudaMemPool_t cymemPool * if memPool is None: # <<<<<<<<<<<<<< * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): */ __pyx_t_1 = (__pyx_v_memPool == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24101 * cdef cyruntime.cudaMemPool_t cymemPool * if memPool is None: * pmemPool = 0 # <<<<<<<<<<<<<< * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): * pmemPool = int(memPool) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pmemPool = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":24100 * """ * cdef cyruntime.cudaMemPool_t cymemPool * if memPool is None: # <<<<<<<<<<<<<< * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":24102 * if memPool is None: * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): # <<<<<<<<<<<<<< * pmemPool = int(memPool) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_memPool, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_memPool, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUmemoryPool); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24103 * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): * pmemPool = int(memPool) # <<<<<<<<<<<<<< * else: * pmemPool = int(cudaMemPool_t(memPool)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_memPool); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24103, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pmemPool = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":24102 * if memPool is None: * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): # <<<<<<<<<<<<<< * pmemPool = int(memPool) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":24105 * pmemPool = int(memPool) * else: * pmemPool = int(cudaMemPool_t(memPool)) # <<<<<<<<<<<<<< * cymemPool = pmemPool * with nogil: */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_memPool}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24105, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24105, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pmemPool = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":24106 * else: * pmemPool = int(cudaMemPool_t(memPool)) * cymemPool = pmemPool # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemPoolDestroy(cymemPool) */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pmemPool); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 24106, __pyx_L1_error) __pyx_v_cymemPool = ((cudaMemPool_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":24107 * pmemPool = int(cudaMemPool_t(memPool)) * cymemPool = pmemPool * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemPoolDestroy(cymemPool) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":24108 * cymemPool = pmemPool * with nogil: * err = cyruntime.cudaMemPoolDestroy(cymemPool) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemPoolDestroy(__pyx_v_cymemPool); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24108, __pyx_L7_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":24107 * pmemPool = int(cudaMemPool_t(memPool)) * cymemPool = pmemPool * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemPoolDestroy(cymemPool) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":24109 * with nogil: * err = cyruntime.cudaMemPoolDestroy(cymemPool) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24109, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24109, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24109, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24109, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 24109, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":24068 * return (_dict_cudaError_t[err], memPool) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemPoolDestroy(memPool): * """ Destroys the specified memory pool. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemPoolDestroy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pmemPool); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":24111 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMallocFromPoolAsync(size_t size, memPool, stream): * """ Allocates memory from a specified pool with stream ordered semantics. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_315cudaMallocFromPoolAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_314cudaMallocFromPoolAsync, "cudaMallocFromPoolAsync(size_t size, memPool, stream)\n\nAllocates memory from a specified pool with stream ordered semantics.\n\nInserts an allocation operation into `hStream`. A pointer to the\nallocated memory is returned immediately in *dptr. The allocation must\nnot be accessed until the the allocation operation completes. The\nallocation comes from the specified memory pool.\n\nParameters\n----------\nbytesize : size_t\n Number of bytes to allocate\nmemPool : :py:obj:`~.CUmemoryPool` or :py:obj:`~.cudaMemPool_t`\n The pool to allocate from\nstream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n The stream establishing the stream ordering semantic\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorNotSupported`, :py:obj:`~.cudaErrorOutOfMemory`\nptr : Any\n Returned device pointer\n\nSee Also\n--------\n:py:obj:`~.cuMemAllocFromPoolAsync`, cudaMallocAsync (C++ API), :py:obj:`~.cudaMallocAsync`, :py:obj:`~.cudaFreeAsync`, :py:obj:`~.cudaDeviceGetDefaultMemPool`, :py:obj:`~.cudaMemPoolCreate`, :py:obj:`~.cudaMemPoolSetAccess`, :py:obj:`~.cudaMemPoolSetAttribute`\n\nNotes\n-----\nDuring stream capture, this function results in the creation of an allocation node. In this case, the allocation is owned by the graph instead of the memory pool. The memory pool's properties are used to set the node's creation parameters."); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_315cudaMallocFromPoolAsync = {"cudaMallocFromPoolAsync", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_315cudaMallocFromPoolAsync, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_314cudaMallocFromPoolAsync}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_315cudaMallocFromPoolAsync(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { size_t __pyx_v_size; PyObject *__pyx_v_memPool = 0; PyObject *__pyx_v_stream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[3] = {0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMallocFromPoolAsync (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_size_2,&__pyx_mstate_global->__pyx_n_u_memPool,&__pyx_mstate_global->__pyx_n_u_stream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24111, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 24111, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24111, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24111, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMallocFromPoolAsync", 0) < (0)) __PYX_ERR(0, 24111, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 3; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMallocFromPoolAsync", 1, 3, 3, i); __PYX_ERR(0, 24111, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 3)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24111, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24111, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 24111, __pyx_L3_error) } __pyx_v_size = __Pyx_PyLong_As_size_t(values[0]); if (unlikely((__pyx_v_size == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 24112, __pyx_L3_error) __pyx_v_memPool = values[1]; __pyx_v_stream = values[2]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMallocFromPoolAsync", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 24111, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMallocFromPoolAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_314cudaMallocFromPoolAsync(__pyx_self, __pyx_v_size, __pyx_v_memPool, __pyx_v_stream); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_314cudaMallocFromPoolAsync(CYTHON_UNUSED PyObject *__pyx_self, size_t __pyx_v_size, PyObject *__pyx_v_memPool, PyObject *__pyx_v_stream) { cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; cudaMemPool_t __pyx_v_cymemPool; PyObject *__pyx_v_pmemPool = NULL; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMallocFromPoolAsync", 0); /* "cuda/bindings/runtime.pyx":24145 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24146 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":24145 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":24147 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24148 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24148, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":24147 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":24150 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * cdef cyruntime.cudaMemPool_t cymemPool */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_stream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24150, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24150, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":24151 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemPool_t cymemPool * if memPool is None: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 24151, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":24153 * cystream = pstream * cdef cyruntime.cudaMemPool_t cymemPool * if memPool is None: # <<<<<<<<<<<<<< * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): */ __pyx_t_1 = (__pyx_v_memPool == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24154 * cdef cyruntime.cudaMemPool_t cymemPool * if memPool is None: * pmemPool = 0 # <<<<<<<<<<<<<< * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): * pmemPool = int(memPool) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pmemPool = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":24153 * cystream = pstream * cdef cyruntime.cudaMemPool_t cymemPool * if memPool is None: # <<<<<<<<<<<<<< * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): */ goto __pyx_L6; } /* "cuda/bindings/runtime.pyx":24155 * if memPool is None: * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): # <<<<<<<<<<<<<< * pmemPool = int(memPool) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_memPool, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L7_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_memPool, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUmemoryPool); __pyx_t_1 = __pyx_t_2; __pyx_L7_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24156 * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): * pmemPool = int(memPool) # <<<<<<<<<<<<<< * else: * pmemPool = int(cudaMemPool_t(memPool)) */ __pyx_t_5 = __Pyx_PyNumber_Int(__pyx_v_memPool); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24156, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_v_pmemPool = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":24155 * if memPool is None: * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): # <<<<<<<<<<<<<< * pmemPool = int(memPool) * else: */ goto __pyx_L6; } /* "cuda/bindings/runtime.pyx":24158 * pmemPool = int(memPool) * else: * pmemPool = int(cudaMemPool_t(memPool)) # <<<<<<<<<<<<<< * cymemPool = pmemPool * cdef void_ptr ptr = 0 */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_memPool}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24158, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_5)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24158, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_5); __pyx_t_5 = 0; __pyx_v_pmemPool = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L6:; /* "cuda/bindings/runtime.pyx":24159 * else: * pmemPool = int(cudaMemPool_t(memPool)) * cymemPool = pmemPool # <<<<<<<<<<<<<< * cdef void_ptr ptr = 0 * with nogil: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pmemPool); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 24159, __pyx_L1_error) __pyx_v_cymemPool = ((cudaMemPool_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":24160 * pmemPool = int(cudaMemPool_t(memPool)) * cymemPool = pmemPool * cdef void_ptr ptr = 0 # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMallocFromPoolAsync(&ptr, size, cymemPool, cystream) */ __pyx_v_ptr = 0; /* "cuda/bindings/runtime.pyx":24161 * cymemPool = pmemPool * cdef void_ptr ptr = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMallocFromPoolAsync(&ptr, size, cymemPool, cystream) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":24162 * cdef void_ptr ptr = 0 * with nogil: * err = cyruntime.cudaMallocFromPoolAsync(&ptr, size, cymemPool, cystream) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMallocFromPoolAsync(((void **)(&__pyx_v_ptr)), __pyx_v_size, __pyx_v_cymemPool, __pyx_v_cystream); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24162, __pyx_L10_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":24161 * cymemPool = pmemPool * cdef void_ptr ptr = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMallocFromPoolAsync(&ptr, size, cymemPool, cystream) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L11; } __pyx_L10_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L11:; } } /* "cuda/bindings/runtime.pyx":24163 * with nogil: * err = cyruntime.cudaMallocFromPoolAsync(&ptr, size, cymemPool, cystream) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], ptr) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24164 * err = cyruntime.cudaMallocFromPoolAsync(&ptr, size, cymemPool, cystream) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], ptr) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 24164, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, Py_None) != (0)) __PYX_ERR(0, 24164, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":24163 * with nogil: * err = cyruntime.cudaMallocFromPoolAsync(&ptr, size, cymemPool, cystream) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], ptr) */ } /* "cuda/bindings/runtime.pyx":24165 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], ptr) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24165, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24165, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24165, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(__pyx_v_ptr); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24165, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24165, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 24165, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_3) != (0)) __PYX_ERR(0, 24165, __pyx_L1_error); __pyx_t_4 = 0; __pyx_t_3 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":24111 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMallocFromPoolAsync(size_t size, memPool, stream): * """ Allocates memory from a specified pool with stream ordered semantics. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMallocFromPoolAsync", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XDECREF(__pyx_v_pmemPool); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":24167 * return (_dict_cudaError_t[err], ptr) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemPoolExportToShareableHandle(memPool, handleType not None : cudaMemAllocationHandleType, unsigned int flags): * """ Exports a memory pool to the requested handle type. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_317cudaMemPoolExportToShareableHandle(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_316cudaMemPoolExportToShareableHandle, "cudaMemPoolExportToShareableHandle(memPool, handleType: cudaMemAllocationHandleType, unsigned int flags)\n\nExports a memory pool to the requested handle type.\n\nGiven an IPC capable mempool, create an OS handle to share the pool\nwith another process. A recipient process can convert the shareable\nhandle into a mempool with\n:py:obj:`~.cudaMemPoolImportFromShareableHandle`. Individual pointers\ncan then be shared with the :py:obj:`~.cudaMemPoolExportPointer` and\n:py:obj:`~.cudaMemPoolImportPointer` APIs. The implementation of what\nthe shareable handle is and how it can be transferred is defined by the\nrequested handle type.\n\nParameters\n----------\npool : :py:obj:`~.CUmemoryPool` or :py:obj:`~.cudaMemPool_t`\n pool to export\nhandleType : :py:obj:`~.cudaMemAllocationHandleType`\n the type of handle to create\nflags : unsigned int\n must be 0\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorOutOfMemory`\nhandle_out : Any\n pointer to the location in which to store the requested handle\n\nSee Also\n--------\n:py:obj:`~.cuMemPoolExportToShareableHandle`, :py:obj:`~.cudaMemPoolImportFromShareableHandle`, :py:obj:`~.cudaMemPoolExportPointer`, :py:obj:`~.cudaMemPoolImportPointer`\n\nNotes\n-----\n: To create an IPC capable mempool, create a mempool with a CUmemAllocationHandleType other than cudaMemHandleTypeNone."); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_317cudaMemPoolExportToShareableHandle = {"cudaMemPoolExportToShareableHandle", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_317cudaMemPoolExportToShareableHandle, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_316cudaMemPoolExportToShareableHandle}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_317cudaMemPoolExportToShareableHandle(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_memPool = 0; PyObject *__pyx_v_handleType = 0; unsigned int __pyx_v_flags; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[3] = {0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemPoolExportToShareableHandle (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_memPool,&__pyx_mstate_global->__pyx_n_u_handleType,&__pyx_mstate_global->__pyx_n_u_flags_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24167, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 24167, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24167, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24167, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemPoolExportToShareableHandle", 0) < (0)) __PYX_ERR(0, 24167, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 3; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemPoolExportToShareableHandle", 1, 3, 3, i); __PYX_ERR(0, 24167, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 3)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24167, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24167, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 24167, __pyx_L3_error) } __pyx_v_memPool = values[0]; __pyx_v_handleType = values[1]; __pyx_v_flags = __Pyx_PyLong_As_unsigned_int(values[2]); if (unlikely((__pyx_v_flags == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24168, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemPoolExportToShareableHandle", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 24167, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemPoolExportToShareableHandle", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_handleType) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "handleType"); __PYX_ERR(0, 24168, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_316cudaMemPoolExportToShareableHandle(__pyx_self, __pyx_v_memPool, __pyx_v_handleType, __pyx_v_flags); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_316cudaMemPoolExportToShareableHandle(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_memPool, PyObject *__pyx_v_handleType, unsigned int __pyx_v_flags) { cudaMemPool_t __pyx_v_cymemPool; PyObject *__pyx_v_pmemPool = NULL; struct __pyx_obj_4cuda_8bindings_7runtime__HelperCUmemAllocationHandleType *__pyx_v_cyshareableHandle = 0; void *__pyx_v_cyshareableHandle_ptr; enum cudaMemAllocationHandleType __pyx_v_cyhandleType; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; enum cudaMemAllocationHandleType __pyx_t_8; cudaError_t __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemPoolExportToShareableHandle", 0); /* "cuda/bindings/runtime.pyx":24205 * """ * cdef cyruntime.cudaMemPool_t cymemPool * if memPool is None: # <<<<<<<<<<<<<< * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): */ __pyx_t_1 = (__pyx_v_memPool == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24206 * cdef cyruntime.cudaMemPool_t cymemPool * if memPool is None: * pmemPool = 0 # <<<<<<<<<<<<<< * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): * pmemPool = int(memPool) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pmemPool = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":24205 * """ * cdef cyruntime.cudaMemPool_t cymemPool * if memPool is None: # <<<<<<<<<<<<<< * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":24207 * if memPool is None: * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): # <<<<<<<<<<<<<< * pmemPool = int(memPool) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_memPool, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_memPool, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUmemoryPool); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24208 * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): * pmemPool = int(memPool) # <<<<<<<<<<<<<< * else: * pmemPool = int(cudaMemPool_t(memPool)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_memPool); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24208, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pmemPool = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":24207 * if memPool is None: * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): # <<<<<<<<<<<<<< * pmemPool = int(memPool) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":24210 * pmemPool = int(memPool) * else: * pmemPool = int(cudaMemPool_t(memPool)) # <<<<<<<<<<<<<< * cymemPool = pmemPool * cdef _HelperCUmemAllocationHandleType cyshareableHandle = _HelperCUmemAllocationHandleType(handleType) */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_memPool}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24210, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24210, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pmemPool = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":24211 * else: * pmemPool = int(cudaMemPool_t(memPool)) * cymemPool = pmemPool # <<<<<<<<<<<<<< * cdef _HelperCUmemAllocationHandleType cyshareableHandle = _HelperCUmemAllocationHandleType(handleType) * cdef void* cyshareableHandle_ptr = cyshareableHandle.cptr */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pmemPool); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 24211, __pyx_L1_error) __pyx_v_cymemPool = ((cudaMemPool_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":24212 * pmemPool = int(cudaMemPool_t(memPool)) * cymemPool = pmemPool * cdef _HelperCUmemAllocationHandleType cyshareableHandle = _HelperCUmemAllocationHandleType(handleType) # <<<<<<<<<<<<<< * cdef void* cyshareableHandle_ptr = cyshareableHandle.cptr * cdef cyruntime.cudaMemAllocationHandleType cyhandleType = handleType.value */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperCUmemAllocationHandleType); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperCUmemAllocationHandleType); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_handleType}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24212, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_cyshareableHandle = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperCUmemAllocationHandleType *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":24213 * cymemPool = pmemPool * cdef _HelperCUmemAllocationHandleType cyshareableHandle = _HelperCUmemAllocationHandleType(handleType) * cdef void* cyshareableHandle_ptr = cyshareableHandle.cptr # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemAllocationHandleType cyhandleType = handleType.value * with nogil: */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cyshareableHandle), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24213, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_5); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 24213, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_cyshareableHandle_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":24214 * cdef _HelperCUmemAllocationHandleType cyshareableHandle = _HelperCUmemAllocationHandleType(handleType) * cdef void* cyshareableHandle_ptr = cyshareableHandle.cptr * cdef cyruntime.cudaMemAllocationHandleType cyhandleType = handleType.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemPoolExportToShareableHandle(cyshareableHandle_ptr, cymemPool, cyhandleType, flags) */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_handleType, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24214, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_8 = ((enum cudaMemAllocationHandleType)__Pyx_PyLong_As_enum__cudaMemAllocationHandleType(__pyx_t_5)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 24214, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_cyhandleType = __pyx_t_8; /* "cuda/bindings/runtime.pyx":24215 * cdef void* cyshareableHandle_ptr = cyshareableHandle.cptr * cdef cyruntime.cudaMemAllocationHandleType cyhandleType = handleType.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemPoolExportToShareableHandle(cyshareableHandle_ptr, cymemPool, cyhandleType, flags) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":24216 * cdef cyruntime.cudaMemAllocationHandleType cyhandleType = handleType.value * with nogil: * err = cyruntime.cudaMemPoolExportToShareableHandle(cyshareableHandle_ptr, cymemPool, cyhandleType, flags) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_9 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemPoolExportToShareableHandle(__pyx_v_cyshareableHandle_ptr, __pyx_v_cymemPool, __pyx_v_cyhandleType, __pyx_v_flags); if (unlikely(__pyx_t_9 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24216, __pyx_L7_error) __pyx_v_err = __pyx_t_9; } /* "cuda/bindings/runtime.pyx":24215 * cdef void* cyshareableHandle_ptr = cyshareableHandle.cptr * cdef cyruntime.cudaMemAllocationHandleType cyhandleType = handleType.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemPoolExportToShareableHandle(cyshareableHandle_ptr, cymemPool, cyhandleType, flags) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":24217 * with nogil: * err = cyruntime.cudaMemPoolExportToShareableHandle(cyshareableHandle_ptr, cymemPool, cyhandleType, flags) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], cyshareableHandle.pyObj()) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24218 * err = cyruntime.cudaMemPoolExportToShareableHandle(cyshareableHandle_ptr, cymemPool, cyhandleType, flags) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], cyshareableHandle.pyObj()) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24218, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24218, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24218, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24218, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 24218, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, Py_None) != (0)) __PYX_ERR(0, 24218, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":24217 * with nogil: * err = cyruntime.cudaMemPoolExportToShareableHandle(cyshareableHandle_ptr, cymemPool, cyhandleType, flags) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], cyshareableHandle.pyObj()) */ } /* "cuda/bindings/runtime.pyx":24219 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], cyshareableHandle.pyObj()) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24219, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24219, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24219, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_4 = ((PyObject *)__pyx_v_cyshareableHandle); __Pyx_INCREF(__pyx_t_4); __pyx_t_6 = 0; { PyObject *__pyx_callargs[2] = {__pyx_t_4, NULL}; __pyx_t_3 = __Pyx_PyObject_FastCallMethod(__pyx_mstate_global->__pyx_n_u_pyObj, __pyx_callargs+__pyx_t_6, (1-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24219, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24219, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 24219, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_3) != (0)) __PYX_ERR(0, 24219, __pyx_L1_error); __pyx_t_5 = 0; __pyx_t_3 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":24167 * return (_dict_cudaError_t[err], ptr) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemPoolExportToShareableHandle(memPool, handleType not None : cudaMemAllocationHandleType, unsigned int flags): * """ Exports a memory pool to the requested handle type. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemPoolExportToShareableHandle", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pmemPool); __Pyx_XDECREF((PyObject *)__pyx_v_cyshareableHandle); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":24221 * return (_dict_cudaError_t[err], cyshareableHandle.pyObj()) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemPoolImportFromShareableHandle(shareableHandle, handleType not None : cudaMemAllocationHandleType, unsigned int flags): * """ imports a memory pool from a shared handle. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_319cudaMemPoolImportFromShareableHandle(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_318cudaMemPoolImportFromShareableHandle, "cudaMemPoolImportFromShareableHandle(shareableHandle, handleType: cudaMemAllocationHandleType, unsigned int flags)\n\nimports a memory pool from a shared handle.\n\nSpecific allocations can be imported from the imported pool with\n:py:obj:`~.cudaMemPoolImportPointer`.\n\nParameters\n----------\nhandle : Any\n OS handle of the pool to open\nhandleType : :py:obj:`~.cudaMemAllocationHandleType`\n The type of handle being imported\nflags : unsigned int\n must be 0\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorOutOfMemory`\npool_out : :py:obj:`~.cudaMemPool_t`\n Returned memory pool\n\nSee Also\n--------\n:py:obj:`~.cuMemPoolImportFromShareableHandle`, :py:obj:`~.cudaMemPoolExportToShareableHandle`, :py:obj:`~.cudaMemPoolExportPointer`, :py:obj:`~.cudaMemPoolImportPointer`\n\nNotes\n-----\nImported memory pools do not support creating new allocations. As such imported memory pools may not be used in :py:obj:`~.cudaDeviceSetMemPool` or :py:obj:`~.cudaMallocFromPoolAsync` calls."); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_319cudaMemPoolImportFromShareableHandle = {"cudaMemPoolImportFromShareableHandle", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_319cudaMemPoolImportFromShareableHandle, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_318cudaMemPoolImportFromShareableHandle}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_319cudaMemPoolImportFromShareableHandle(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_shareableHandle = 0; PyObject *__pyx_v_handleType = 0; unsigned int __pyx_v_flags; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[3] = {0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemPoolImportFromShareableHandle (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_shareableHandle,&__pyx_mstate_global->__pyx_n_u_handleType,&__pyx_mstate_global->__pyx_n_u_flags_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24221, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 24221, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24221, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24221, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemPoolImportFromShareableHandle", 0) < (0)) __PYX_ERR(0, 24221, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 3; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemPoolImportFromShareableHandle", 1, 3, 3, i); __PYX_ERR(0, 24221, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 3)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24221, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24221, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 24221, __pyx_L3_error) } __pyx_v_shareableHandle = values[0]; __pyx_v_handleType = values[1]; __pyx_v_flags = __Pyx_PyLong_As_unsigned_int(values[2]); if (unlikely((__pyx_v_flags == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24222, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemPoolImportFromShareableHandle", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 24221, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemPoolImportFromShareableHandle", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_handleType) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "handleType"); __PYX_ERR(0, 24222, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_318cudaMemPoolImportFromShareableHandle(__pyx_self, __pyx_v_shareableHandle, __pyx_v_handleType, __pyx_v_flags); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_318cudaMemPoolImportFromShareableHandle(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_shareableHandle, PyObject *__pyx_v_handleType, unsigned int __pyx_v_flags) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemPool_t *__pyx_v_memPool = 0; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cyshareableHandle = NULL; void *__pyx_v_cyshareableHandle_ptr; enum cudaMemAllocationHandleType __pyx_v_cyhandleType; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_5; enum cudaMemAllocationHandleType __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemPoolImportFromShareableHandle", 0); /* "cuda/bindings/runtime.pyx":24252 * Imported memory pools do not support creating new allocations. As such imported memory pools may not be used in :py:obj:`~.cudaDeviceSetMemPool` or :py:obj:`~.cudaMallocFromPoolAsync` calls. * """ * cdef cudaMemPool_t memPool = cudaMemPool_t() # <<<<<<<<<<<<<< * cyshareableHandle = _HelperInputVoidPtr(shareableHandle) * cdef void* cyshareableHandle_ptr = cyshareableHandle.cptr */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (1-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24252, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_memPool = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemPool_t *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":24253 * """ * cdef cudaMemPool_t memPool = cudaMemPool_t() * cyshareableHandle = _HelperInputVoidPtr(shareableHandle) # <<<<<<<<<<<<<< * cdef void* cyshareableHandle_ptr = cyshareableHandle.cptr * cdef cyruntime.cudaMemAllocationHandleType cyhandleType = handleType.value */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_shareableHandle}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24253, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_cyshareableHandle = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":24254 * cdef cudaMemPool_t memPool = cudaMemPool_t() * cyshareableHandle = _HelperInputVoidPtr(shareableHandle) * cdef void* cyshareableHandle_ptr = cyshareableHandle.cptr # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemAllocationHandleType cyhandleType = handleType.value * with nogil: */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cyshareableHandle), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_5 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 24254, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cyshareableHandle_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_5)); /* "cuda/bindings/runtime.pyx":24255 * cyshareableHandle = _HelperInputVoidPtr(shareableHandle) * cdef void* cyshareableHandle_ptr = cyshareableHandle.cptr * cdef cyruntime.cudaMemAllocationHandleType cyhandleType = handleType.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemPoolImportFromShareableHandle(memPool._pvt_ptr, cyshareableHandle_ptr, cyhandleType, flags) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_handleType, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24255, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_6 = ((enum cudaMemAllocationHandleType)__Pyx_PyLong_As_enum__cudaMemAllocationHandleType(__pyx_t_1)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 24255, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cyhandleType = __pyx_t_6; /* "cuda/bindings/runtime.pyx":24256 * cdef void* cyshareableHandle_ptr = cyshareableHandle.cptr * cdef cyruntime.cudaMemAllocationHandleType cyhandleType = handleType.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemPoolImportFromShareableHandle(memPool._pvt_ptr, cyshareableHandle_ptr, cyhandleType, flags) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":24257 * cdef cyruntime.cudaMemAllocationHandleType cyhandleType = handleType.value * with nogil: * err = cyruntime.cudaMemPoolImportFromShareableHandle(memPool._pvt_ptr, cyshareableHandle_ptr, cyhandleType, flags) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemPoolImportFromShareableHandle(((cudaMemPool_t *)__pyx_v_memPool->__pyx_base._pvt_ptr), __pyx_v_cyshareableHandle_ptr, __pyx_v_cyhandleType, __pyx_v_flags); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24257, __pyx_L4_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":24256 * cdef void* cyshareableHandle_ptr = cyshareableHandle.cptr * cdef cyruntime.cudaMemAllocationHandleType cyhandleType = handleType.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemPoolImportFromShareableHandle(memPool._pvt_ptr, cyshareableHandle_ptr, cyhandleType, flags) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":24258 * with nogil: * err = cyruntime.cudaMemPoolImportFromShareableHandle(memPool._pvt_ptr, cyshareableHandle_ptr, cyhandleType, flags) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], memPool) */ __pyx_t_8 = (__pyx_v_err != cudaSuccess); if (__pyx_t_8) { /* "cuda/bindings/runtime.pyx":24259 * err = cyruntime.cudaMemPoolImportFromShareableHandle(memPool._pvt_ptr, cyshareableHandle_ptr, cyhandleType, flags) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], memPool) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24259, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24259, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24259, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24259, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 24259, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, Py_None) != (0)) __PYX_ERR(0, 24259, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":24258 * with nogil: * err = cyruntime.cudaMemPoolImportFromShareableHandle(memPool._pvt_ptr, cyshareableHandle_ptr, cyhandleType, flags) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], memPool) */ } /* "cuda/bindings/runtime.pyx":24260 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], memPool) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24260, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24260, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24260, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24260, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 24260, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_memPool); __Pyx_GIVEREF((PyObject *)__pyx_v_memPool); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, ((PyObject *)__pyx_v_memPool)) != (0)) __PYX_ERR(0, 24260, __pyx_L1_error); __pyx_t_1 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":24221 * return (_dict_cudaError_t[err], cyshareableHandle.pyObj()) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemPoolImportFromShareableHandle(shareableHandle, handleType not None : cudaMemAllocationHandleType, unsigned int flags): * """ imports a memory pool from a shared handle. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemPoolImportFromShareableHandle", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_memPool); __Pyx_XDECREF((PyObject *)__pyx_v_cyshareableHandle); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":24262 * return (_dict_cudaError_t[err], memPool) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemPoolExportPointer(ptr): * """ Export data to share a memory pool allocation between processes. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_321cudaMemPoolExportPointer(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_320cudaMemPoolExportPointer, "cudaMemPoolExportPointer(ptr)\n\nExport data to share a memory pool allocation between processes.\n\nConstructs `shareData_out` for sharing a specific allocation from an\nalready shared memory pool. The recipient process can import the\nallocation with the :py:obj:`~.cudaMemPoolImportPointer` api. The data\nis not a handle and may be shared through any IPC mechanism.\n\nParameters\n----------\nptr : Any\n pointer to memory being exported\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorOutOfMemory`\nshareData_out : :py:obj:`~.cudaMemPoolPtrExportData`\n Returned export data\n\nSee Also\n--------\n:py:obj:`~.cuMemPoolExportPointer`, :py:obj:`~.cudaMemPoolExportToShareableHandle`, :py:obj:`~.cudaMemPoolImportFromShareableHandle`, :py:obj:`~.cudaMemPoolImportPointer`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_321cudaMemPoolExportPointer = {"cudaMemPoolExportPointer", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_321cudaMemPoolExportPointer, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_320cudaMemPoolExportPointer}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_321cudaMemPoolExportPointer(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_ptr = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemPoolExportPointer (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24262, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24262, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemPoolExportPointer", 0) < (0)) __PYX_ERR(0, 24262, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemPoolExportPointer", 1, 1, 1, i); __PYX_ERR(0, 24262, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24262, __pyx_L3_error) } __pyx_v_ptr = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemPoolExportPointer", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24262, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemPoolExportPointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_320cudaMemPoolExportPointer(__pyx_self, __pyx_v_ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_320cudaMemPoolExportPointer(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_ptr) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemPoolPtrExportData *__pyx_v_exportData = 0; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cyptr = NULL; void *__pyx_v_cyptr_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_5; cudaError_t __pyx_t_6; int __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemPoolExportPointer", 0); /* "cuda/bindings/runtime.pyx":24287 * :py:obj:`~.cuMemPoolExportPointer`, :py:obj:`~.cudaMemPoolExportToShareableHandle`, :py:obj:`~.cudaMemPoolImportFromShareableHandle`, :py:obj:`~.cudaMemPoolImportPointer` * """ * cdef cudaMemPoolPtrExportData exportData = cudaMemPoolPtrExportData() # <<<<<<<<<<<<<< * cyptr = _HelperInputVoidPtr(ptr) * cdef void* cyptr_ptr = cyptr.cptr */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPoolPtrExportData); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPoolPtrExportData); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (1-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24287, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_exportData = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemPoolPtrExportData *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":24288 * """ * cdef cudaMemPoolPtrExportData exportData = cudaMemPoolPtrExportData() * cyptr = _HelperInputVoidPtr(ptr) # <<<<<<<<<<<<<< * cdef void* cyptr_ptr = cyptr.cptr * with nogil: */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_ptr}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24288, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_cyptr = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":24289 * cdef cudaMemPoolPtrExportData exportData = cudaMemPoolPtrExportData() * cyptr = _HelperInputVoidPtr(ptr) * cdef void* cyptr_ptr = cyptr.cptr # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemPoolExportPointer(exportData._pvt_ptr, cyptr_ptr) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cyptr), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24289, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_5 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 24289, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cyptr_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_5)); /* "cuda/bindings/runtime.pyx":24290 * cyptr = _HelperInputVoidPtr(ptr) * cdef void* cyptr_ptr = cyptr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemPoolExportPointer(exportData._pvt_ptr, cyptr_ptr) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":24291 * cdef void* cyptr_ptr = cyptr.cptr * with nogil: * err = cyruntime.cudaMemPoolExportPointer(exportData._pvt_ptr, cyptr_ptr) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_6 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemPoolExportPointer(((struct cudaMemPoolPtrExportData *)__pyx_v_exportData->_pvt_ptr), __pyx_v_cyptr_ptr); if (unlikely(__pyx_t_6 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24291, __pyx_L4_error) __pyx_v_err = __pyx_t_6; } /* "cuda/bindings/runtime.pyx":24290 * cyptr = _HelperInputVoidPtr(ptr) * cdef void* cyptr_ptr = cyptr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemPoolExportPointer(exportData._pvt_ptr, cyptr_ptr) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":24292 * with nogil: * err = cyruntime.cudaMemPoolExportPointer(exportData._pvt_ptr, cyptr_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], exportData) */ __pyx_t_7 = (__pyx_v_err != cudaSuccess); if (__pyx_t_7) { /* "cuda/bindings/runtime.pyx":24293 * err = cyruntime.cudaMemPoolExportPointer(exportData._pvt_ptr, cyptr_ptr) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], exportData) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24293, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24293, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24293, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24293, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 24293, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, Py_None) != (0)) __PYX_ERR(0, 24293, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":24292 * with nogil: * err = cyruntime.cudaMemPoolExportPointer(exportData._pvt_ptr, cyptr_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], exportData) */ } /* "cuda/bindings/runtime.pyx":24294 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], exportData) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24294, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24294, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24294, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24294, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 24294, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_exportData); __Pyx_GIVEREF((PyObject *)__pyx_v_exportData); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, ((PyObject *)__pyx_v_exportData)) != (0)) __PYX_ERR(0, 24294, __pyx_L1_error); __pyx_t_1 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":24262 * return (_dict_cudaError_t[err], memPool) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemPoolExportPointer(ptr): * """ Export data to share a memory pool allocation between processes. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemPoolExportPointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_exportData); __Pyx_XDECREF((PyObject *)__pyx_v_cyptr); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":24296 * return (_dict_cudaError_t[err], exportData) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemPoolImportPointer(memPool, exportData : Optional[cudaMemPoolPtrExportData]): * """ Import a memory pool allocation from another process. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_323cudaMemPoolImportPointer(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_322cudaMemPoolImportPointer, "cudaMemPoolImportPointer(memPool, cudaMemPoolPtrExportData exportData: Optional[cudaMemPoolPtrExportData])\n\nImport a memory pool allocation from another process.\n\nReturns in `ptr_out` a pointer to the imported memory. The imported\nmemory must not be accessed before the allocation operation completes\nin the exporting process. The imported memory must be freed from all\nimporting processes before being freed in the exporting process. The\npointer may be freed with cudaFree or cudaFreeAsync. If\n:py:obj:`~.cudaFreeAsync` is used, the free must be completed on the\nimporting process before the free operation on the exporting process.\n\nParameters\n----------\npool : :py:obj:`~.CUmemoryPool` or :py:obj:`~.cudaMemPool_t`\n pool from which to import\nshareData : :py:obj:`~.cudaMemPoolPtrExportData`\n data specifying the memory to import\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.CUDA_SUCCESS`, :py:obj:`~.CUDA_ERROR_INVALID_VALUE`, :py:obj:`~.CUDA_ERROR_NOT_INITIALIZED`, :py:obj:`~.CUDA_ERROR_OUT_OF_MEMORY`\nptr_out : Any\n pointer to imported memory\n\nSee Also\n--------\n:py:obj:`~.cuMemPoolImportPointer`, :py:obj:`~.cudaMemPoolExportToShareableHandle`, :py:obj:`~.cudaMemPoolImportFromShareableHandle`, :py:obj:`~.cudaMemPoolExportPointer`\n\nNotes\n-----\nThe :py:obj:`~.cudaFreeAsync` api may be used in the exporting process before the :py:obj:`~.cudaFreeAsync` operation completes in its stream as long as the :py:obj:`~.cudaFreeAsync` in the exporting process specifies a stream with a stream dependency on the importing process's :py:obj:`~.cudaFreeAsync`."); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_323cudaMemPoolImportPointer = {"cudaMemPoolImportPointer", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_323cudaMemPoolImportPointer, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_322cudaMemPoolImportPointer}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_323cudaMemPoolImportPointer(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_memPool = 0; struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemPoolPtrExportData *__pyx_v_exportData = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaMemPoolImportPointer (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_memPool,&__pyx_mstate_global->__pyx_n_u_exportData,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24296, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24296, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24296, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaMemPoolImportPointer", 0) < (0)) __PYX_ERR(0, 24296, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaMemPoolImportPointer", 1, 2, 2, i); __PYX_ERR(0, 24296, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24296, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24296, __pyx_L3_error) } __pyx_v_memPool = values[0]; __pyx_v_exportData = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemPoolPtrExportData *)values[1]); } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaMemPoolImportPointer", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 24296, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemPoolImportPointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_exportData), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPoolPtrExportData, 1, "exportData", 0))) __PYX_ERR(0, 24297, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_322cudaMemPoolImportPointer(__pyx_self, __pyx_v_memPool, __pyx_v_exportData); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_322cudaMemPoolImportPointer(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_memPool, struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemPoolPtrExportData *__pyx_v_exportData) { cudaMemPool_t __pyx_v_cymemPool; PyObject *__pyx_v_pmemPool = NULL; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v_ptr; struct cudaMemPoolPtrExportData *__pyx_v_cyexportData_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; struct cudaMemPoolPtrExportData *__pyx_t_8; cudaError_t __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaMemPoolImportPointer", 0); /* "cuda/bindings/runtime.pyx":24331 * """ * cdef cyruntime.cudaMemPool_t cymemPool * if memPool is None: # <<<<<<<<<<<<<< * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): */ __pyx_t_1 = (__pyx_v_memPool == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24332 * cdef cyruntime.cudaMemPool_t cymemPool * if memPool is None: * pmemPool = 0 # <<<<<<<<<<<<<< * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): * pmemPool = int(memPool) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pmemPool = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":24331 * """ * cdef cyruntime.cudaMemPool_t cymemPool * if memPool is None: # <<<<<<<<<<<<<< * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":24333 * if memPool is None: * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): # <<<<<<<<<<<<<< * pmemPool = int(memPool) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_memPool, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_memPool, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUmemoryPool); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24334 * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): * pmemPool = int(memPool) # <<<<<<<<<<<<<< * else: * pmemPool = int(cudaMemPool_t(memPool)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_memPool); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24334, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pmemPool = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":24333 * if memPool is None: * pmemPool = 0 * elif isinstance(memPool, (cudaMemPool_t,driver.CUmemoryPool)): # <<<<<<<<<<<<<< * pmemPool = int(memPool) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":24336 * pmemPool = int(memPool) * else: * pmemPool = int(cudaMemPool_t(memPool)) # <<<<<<<<<<<<<< * cymemPool = pmemPool * cdef void_ptr ptr = 0 */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemPool_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_memPool}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24336, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24336, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pmemPool = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":24337 * else: * pmemPool = int(cudaMemPool_t(memPool)) * cymemPool = pmemPool # <<<<<<<<<<<<<< * cdef void_ptr ptr = 0 * cdef cyruntime.cudaMemPoolPtrExportData* cyexportData_ptr = exportData._pvt_ptr if exportData is not None else NULL */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pmemPool); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 24337, __pyx_L1_error) __pyx_v_cymemPool = ((cudaMemPool_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":24338 * pmemPool = int(cudaMemPool_t(memPool)) * cymemPool = pmemPool * cdef void_ptr ptr = 0 # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemPoolPtrExportData* cyexportData_ptr = exportData._pvt_ptr if exportData is not None else NULL * with nogil: */ __pyx_v_ptr = 0; /* "cuda/bindings/runtime.pyx":24339 * cymemPool = pmemPool * cdef void_ptr ptr = 0 * cdef cyruntime.cudaMemPoolPtrExportData* cyexportData_ptr = exportData._pvt_ptr if exportData is not None else NULL # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaMemPoolImportPointer(&ptr, cymemPool, cyexportData_ptr) */ __pyx_t_1 = (((PyObject *)__pyx_v_exportData) != Py_None); if (__pyx_t_1) { __pyx_t_8 = __pyx_v_exportData->_pvt_ptr; } else { __pyx_t_8 = NULL; } __pyx_v_cyexportData_ptr = __pyx_t_8; /* "cuda/bindings/runtime.pyx":24340 * cdef void_ptr ptr = 0 * cdef cyruntime.cudaMemPoolPtrExportData* cyexportData_ptr = exportData._pvt_ptr if exportData is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemPoolImportPointer(&ptr, cymemPool, cyexportData_ptr) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":24341 * cdef cyruntime.cudaMemPoolPtrExportData* cyexportData_ptr = exportData._pvt_ptr if exportData is not None else NULL * with nogil: * err = cyruntime.cudaMemPoolImportPointer(&ptr, cymemPool, cyexportData_ptr) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_9 = __pyx_f_4cuda_8bindings_9cyruntime_cudaMemPoolImportPointer(((void **)(&__pyx_v_ptr)), __pyx_v_cymemPool, __pyx_v_cyexportData_ptr); if (unlikely(__pyx_t_9 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24341, __pyx_L7_error) __pyx_v_err = __pyx_t_9; } /* "cuda/bindings/runtime.pyx":24340 * cdef void_ptr ptr = 0 * cdef cyruntime.cudaMemPoolPtrExportData* cyexportData_ptr = exportData._pvt_ptr if exportData is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaMemPoolImportPointer(&ptr, cymemPool, cyexportData_ptr) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":24342 * with nogil: * err = cyruntime.cudaMemPoolImportPointer(&ptr, cymemPool, cyexportData_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], ptr) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24343 * err = cyruntime.cudaMemPoolImportPointer(&ptr, cymemPool, cyexportData_ptr) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], ptr) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24343, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24343, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24343, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24343, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 24343, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 24343, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":24342 * with nogil: * err = cyruntime.cudaMemPoolImportPointer(&ptr, cymemPool, cyexportData_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], ptr) */ } /* "cuda/bindings/runtime.pyx":24344 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], ptr) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24344, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24344, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24344, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(__pyx_v_ptr); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24344, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24344, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 24344, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_4) != (0)) __PYX_ERR(0, 24344, __pyx_L1_error); __pyx_t_5 = 0; __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":24296 * return (_dict_cudaError_t[err], exportData) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaMemPoolImportPointer(memPool, exportData : Optional[cudaMemPoolPtrExportData]): * """ Import a memory pool allocation from another process. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaMemPoolImportPointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pmemPool); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":24346 * return (_dict_cudaError_t[err], ptr) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaPointerGetAttributes(ptr): * """ Returns attributes about a specified pointer. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_325cudaPointerGetAttributes(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_324cudaPointerGetAttributes, "cudaPointerGetAttributes(ptr)\n\nReturns attributes about a specified pointer.\n\nReturns in `*attributes` the attributes of the pointer `ptr`. If\npointer was not allocated in, mapped by or registered with context\nsupporting unified addressing :py:obj:`~.cudaErrorInvalidValue` is\nreturned.\n\nThe :py:obj:`~.cudaPointerAttributes` structure is defined as:\n\n**View CUDA Toolkit Documentation for a C++ code example**\n\nIn this structure, the individual fields mean\n\n- :py:obj:`~.cudaPointerAttributes.type` identifies type of memory. It\n can be :py:obj:`~.cudaMemoryTypeUnregistered` for unregistered host\n memory, :py:obj:`~.cudaMemoryTypeHost` for registered host memory,\n :py:obj:`~.cudaMemoryTypeDevice` for device memory or\n :py:obj:`~.cudaMemoryTypeManaged` for managed memory.\n\n- :py:obj:`~.device` is the device against which `ptr` was allocated.\n If `ptr` has memory type :py:obj:`~.cudaMemoryTypeDevice` then this\n identifies the device on which the memory referred to by `ptr`\n physically resides. If `ptr` has memory type\n :py:obj:`~.cudaMemoryTypeHost` then this identifies the device which\n was current when the allocation was made (and if that device is\n deinitialized then this allocation will vanish with that device's\n state).\n\n- :py:obj:`~.devicePointer` is the device pointer alias through which\n the memory referred to by `ptr` may be accessed on the current\n device. If the memory referred to by `ptr` cannot be accessed\n directly by the current device then this is NULL.\n\n- :py:obj:`~.hostPointer` is the host pointer alias through which the\n memory referred to by `ptr` may be accessed on the host. If the\n memory referred to by `ptr` cannot be accessed directly by the host\n then this is NULL.\n\nParameters\n----------\nptr : Any\n Pointer to get attributes for\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidDevice`, :py:obj:`~.cudaErrorInvalidValue`\nattributes : :py:obj:`~.cud""aPointerAttributes`\n Attributes for the specified pointer\n\nSee Also\n--------\n:py:obj:`~.cudaGetDeviceCount`, :py:obj:`~.cudaGetDevice`, :py:obj:`~.cudaSetDevice`, :py:obj:`~.cudaChooseDevice`, :py:obj:`~.cudaInitDevice`, :py:obj:`~.cuPointerGetAttributes`\n\nNotes\n-----\nIn CUDA 11.0 forward passing host pointer will return :py:obj:`~.cudaMemoryTypeUnregistered` in :py:obj:`~.cudaPointerAttributes.type` and call will return :py:obj:`~.cudaSuccess`."); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_325cudaPointerGetAttributes = {"cudaPointerGetAttributes", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_325cudaPointerGetAttributes, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_324cudaPointerGetAttributes}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_325cudaPointerGetAttributes(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_ptr = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaPointerGetAttributes (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_ptr,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24346, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24346, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaPointerGetAttributes", 0) < (0)) __PYX_ERR(0, 24346, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaPointerGetAttributes", 1, 1, 1, i); __PYX_ERR(0, 24346, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24346, __pyx_L3_error) } __pyx_v_ptr = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaPointerGetAttributes", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24346, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaPointerGetAttributes", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_324cudaPointerGetAttributes(__pyx_self, __pyx_v_ptr); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_324cudaPointerGetAttributes(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_ptr) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaPointerAttributes *__pyx_v_attributes = 0; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cyptr = NULL; void *__pyx_v_cyptr_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_5; cudaError_t __pyx_t_6; int __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaPointerGetAttributes", 0); /* "cuda/bindings/runtime.pyx":24406 * In CUDA 11.0 forward passing host pointer will return :py:obj:`~.cudaMemoryTypeUnregistered` in :py:obj:`~.cudaPointerAttributes.type` and call will return :py:obj:`~.cudaSuccess`. * """ * cdef cudaPointerAttributes attributes = cudaPointerAttributes() # <<<<<<<<<<<<<< * cyptr = _HelperInputVoidPtr(ptr) * cdef void* cyptr_ptr = cyptr.cptr */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaPointerAttributes); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaPointerAttributes); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (1-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24406, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_attributes = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaPointerAttributes *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":24407 * """ * cdef cudaPointerAttributes attributes = cudaPointerAttributes() * cyptr = _HelperInputVoidPtr(ptr) # <<<<<<<<<<<<<< * cdef void* cyptr_ptr = cyptr.cptr * with nogil: */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_2 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_ptr}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_2, __pyx_callargs+__pyx_t_4, (2-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24407, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_cyptr = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":24408 * cdef cudaPointerAttributes attributes = cudaPointerAttributes() * cyptr = _HelperInputVoidPtr(ptr) * cdef void* cyptr_ptr = cyptr.cptr # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaPointerGetAttributes(attributes._pvt_ptr, cyptr_ptr) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cyptr), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24408, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_5 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 24408, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cyptr_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_5)); /* "cuda/bindings/runtime.pyx":24409 * cyptr = _HelperInputVoidPtr(ptr) * cdef void* cyptr_ptr = cyptr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaPointerGetAttributes(attributes._pvt_ptr, cyptr_ptr) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":24410 * cdef void* cyptr_ptr = cyptr.cptr * with nogil: * err = cyruntime.cudaPointerGetAttributes(attributes._pvt_ptr, cyptr_ptr) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_6 = __pyx_f_4cuda_8bindings_9cyruntime_cudaPointerGetAttributes(((struct cudaPointerAttributes *)__pyx_v_attributes->_pvt_ptr), __pyx_v_cyptr_ptr); if (unlikely(__pyx_t_6 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24410, __pyx_L4_error) __pyx_v_err = __pyx_t_6; } /* "cuda/bindings/runtime.pyx":24409 * cyptr = _HelperInputVoidPtr(ptr) * cdef void* cyptr_ptr = cyptr.cptr * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaPointerGetAttributes(attributes._pvt_ptr, cyptr_ptr) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":24411 * with nogil: * err = cyruntime.cudaPointerGetAttributes(attributes._pvt_ptr, cyptr_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], attributes) */ __pyx_t_7 = (__pyx_v_err != cudaSuccess); if (__pyx_t_7) { /* "cuda/bindings/runtime.pyx":24412 * err = cyruntime.cudaPointerGetAttributes(attributes._pvt_ptr, cyptr_ptr) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], attributes) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24412, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24412, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24412, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24412, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 24412, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, Py_None) != (0)) __PYX_ERR(0, 24412, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":24411 * with nogil: * err = cyruntime.cudaPointerGetAttributes(attributes._pvt_ptr, cyptr_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], attributes) */ } /* "cuda/bindings/runtime.pyx":24413 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], attributes) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24413, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24413, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24413, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24413, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 24413, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_attributes); __Pyx_GIVEREF((PyObject *)__pyx_v_attributes); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, ((PyObject *)__pyx_v_attributes)) != (0)) __PYX_ERR(0, 24413, __pyx_L1_error); __pyx_t_1 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":24346 * return (_dict_cudaError_t[err], ptr) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaPointerGetAttributes(ptr): * """ Returns attributes about a specified pointer. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaPointerGetAttributes", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_attributes); __Pyx_XDECREF((PyObject *)__pyx_v_cyptr); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":24415 * return (_dict_cudaError_t[err], attributes) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceCanAccessPeer(int device, int peerDevice): * """ Queries if a device may directly access a peer device's memory. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_327cudaDeviceCanAccessPeer(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_326cudaDeviceCanAccessPeer, "cudaDeviceCanAccessPeer(int device, int peerDevice)\n\nQueries if a device may directly access a peer device's memory.\n\nReturns in `*canAccessPeer` a value of 1 if device `device` is capable\nof directly accessing memory from `peerDevice` and 0 otherwise. If\ndirect access of `peerDevice` from `device` is possible, then access\nmay be enabled by calling :py:obj:`~.cudaDeviceEnablePeerAccess()`.\n\nParameters\n----------\ndevice : int\n Device from which allocations on `peerDevice` are to be directly\n accessed.\npeerDevice : int\n Device on which the allocations to be directly accessed by `device`\n reside.\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidDevice`\ncanAccessPeer : int\n Returned access capability\n\nSee Also\n--------\n:py:obj:`~.cudaDeviceEnablePeerAccess`, :py:obj:`~.cudaDeviceDisablePeerAccess`, :py:obj:`~.cuDeviceCanAccessPeer`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_327cudaDeviceCanAccessPeer = {"cudaDeviceCanAccessPeer", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_327cudaDeviceCanAccessPeer, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_326cudaDeviceCanAccessPeer}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_327cudaDeviceCanAccessPeer(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { int __pyx_v_device; int __pyx_v_peerDevice; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaDeviceCanAccessPeer (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_device_2,&__pyx_mstate_global->__pyx_n_u_peerDevice,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24415, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24415, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24415, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaDeviceCanAccessPeer", 0) < (0)) __PYX_ERR(0, 24415, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaDeviceCanAccessPeer", 1, 2, 2, i); __PYX_ERR(0, 24415, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24415, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24415, __pyx_L3_error) } __pyx_v_device = __Pyx_PyLong_As_int(values[0]); if (unlikely((__pyx_v_device == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24416, __pyx_L3_error) __pyx_v_peerDevice = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_peerDevice == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24416, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaDeviceCanAccessPeer", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 24415, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceCanAccessPeer", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_326cudaDeviceCanAccessPeer(__pyx_self, __pyx_v_device, __pyx_v_peerDevice); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_326cudaDeviceCanAccessPeer(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_device, int __pyx_v_peerDevice) { int __pyx_v_canAccessPeer; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations cudaError_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaDeviceCanAccessPeer", 0); /* "cuda/bindings/runtime.pyx":24444 * :py:obj:`~.cudaDeviceEnablePeerAccess`, :py:obj:`~.cudaDeviceDisablePeerAccess`, :py:obj:`~.cuDeviceCanAccessPeer` * """ * cdef int canAccessPeer = 0 # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaDeviceCanAccessPeer(&canAccessPeer, device, peerDevice) */ __pyx_v_canAccessPeer = 0; /* "cuda/bindings/runtime.pyx":24445 * """ * cdef int canAccessPeer = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceCanAccessPeer(&canAccessPeer, device, peerDevice) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":24446 * cdef int canAccessPeer = 0 * with nogil: * err = cyruntime.cudaDeviceCanAccessPeer(&canAccessPeer, device, peerDevice) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_1 = __pyx_f_4cuda_8bindings_9cyruntime_cudaDeviceCanAccessPeer((&__pyx_v_canAccessPeer), __pyx_v_device, __pyx_v_peerDevice); if (unlikely(__pyx_t_1 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24446, __pyx_L4_error) __pyx_v_err = __pyx_t_1; } /* "cuda/bindings/runtime.pyx":24445 * """ * cdef int canAccessPeer = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceCanAccessPeer(&canAccessPeer, device, peerDevice) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":24447 * with nogil: * err = cyruntime.cudaDeviceCanAccessPeer(&canAccessPeer, device, peerDevice) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], canAccessPeer) */ __pyx_t_2 = (__pyx_v_err != cudaSuccess); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":24448 * err = cyruntime.cudaDeviceCanAccessPeer(&canAccessPeer, device, peerDevice) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], canAccessPeer) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24448, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24448, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24448, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24448, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 24448, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, Py_None) != (0)) __PYX_ERR(0, 24448, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":24447 * with nogil: * err = cyruntime.cudaDeviceCanAccessPeer(&canAccessPeer, device, peerDevice) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], canAccessPeer) */ } /* "cuda/bindings/runtime.pyx":24449 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], canAccessPeer) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24449, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24449, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24449, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyLong_From_int(__pyx_v_canAccessPeer); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24449, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24449, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 24449, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_5) != (0)) __PYX_ERR(0, 24449, __pyx_L1_error); __pyx_t_3 = 0; __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":24415 * return (_dict_cudaError_t[err], attributes) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceCanAccessPeer(int device, int peerDevice): * """ Queries if a device may directly access a peer device's memory. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceCanAccessPeer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":24451 * return (_dict_cudaError_t[err], canAccessPeer) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceEnablePeerAccess(int peerDevice, unsigned int flags): * """ Enables direct access to memory allocations on a peer device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_329cudaDeviceEnablePeerAccess(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_328cudaDeviceEnablePeerAccess, "cudaDeviceEnablePeerAccess(int peerDevice, unsigned int flags)\n\nEnables direct access to memory allocations on a peer device.\n\nOn success, all allocations from `peerDevice` will immediately be\naccessible by the current device. They will remain accessible until\naccess is explicitly disabled using\n:py:obj:`~.cudaDeviceDisablePeerAccess()` or either device is reset\nusing :py:obj:`~.cudaDeviceReset()`.\n\nNote that access granted by this call is unidirectional and that in\norder to access memory on the current device from `peerDevice`, a\nseparate symmetric call to :py:obj:`~.cudaDeviceEnablePeerAccess()` is\nrequired.\n\nNote that there are both device-wide and system-wide limitations per\nsystem configuration, as noted in the CUDA Programming Guide under the\nsection \"Peer-to-Peer Memory Access\".\n\nReturns :py:obj:`~.cudaErrorInvalidDevice` if\n:py:obj:`~.cudaDeviceCanAccessPeer()` indicates that the current device\ncannot directly access memory from `peerDevice`.\n\nReturns :py:obj:`~.cudaErrorPeerAccessAlreadyEnabled` if direct access\nof `peerDevice` from the current device has already been enabled.\n\nReturns :py:obj:`~.cudaErrorInvalidValue` if `flags` is not 0.\n\nParameters\n----------\npeerDevice : int\n Peer device to enable direct access to from the current device\nflags : unsigned int\n Reserved for future use and must be set to 0\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidDevice`, :py:obj:`~.cudaErrorPeerAccessAlreadyEnabled`, :py:obj:`~.cudaErrorInvalidValue`\n\nSee Also\n--------\n:py:obj:`~.cudaDeviceCanAccessPeer`, :py:obj:`~.cudaDeviceDisablePeerAccess`, :py:obj:`~.cuCtxEnablePeerAccess`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_329cudaDeviceEnablePeerAccess = {"cudaDeviceEnablePeerAccess", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_329cudaDeviceEnablePeerAccess, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_328cudaDeviceEnablePeerAccess}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_329cudaDeviceEnablePeerAccess(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { int __pyx_v_peerDevice; unsigned int __pyx_v_flags; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaDeviceEnablePeerAccess (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_peerDevice,&__pyx_mstate_global->__pyx_n_u_flags_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24451, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24451, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24451, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaDeviceEnablePeerAccess", 0) < (0)) __PYX_ERR(0, 24451, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaDeviceEnablePeerAccess", 1, 2, 2, i); __PYX_ERR(0, 24451, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24451, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24451, __pyx_L3_error) } __pyx_v_peerDevice = __Pyx_PyLong_As_int(values[0]); if (unlikely((__pyx_v_peerDevice == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24452, __pyx_L3_error) __pyx_v_flags = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_flags == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24452, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaDeviceEnablePeerAccess", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 24451, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceEnablePeerAccess", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_328cudaDeviceEnablePeerAccess(__pyx_self, __pyx_v_peerDevice, __pyx_v_flags); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_328cudaDeviceEnablePeerAccess(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_peerDevice, unsigned int __pyx_v_flags) { cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations cudaError_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaDeviceEnablePeerAccess", 0); /* "cuda/bindings/runtime.pyx":24495 * :py:obj:`~.cudaDeviceCanAccessPeer`, :py:obj:`~.cudaDeviceDisablePeerAccess`, :py:obj:`~.cuCtxEnablePeerAccess` * """ * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceEnablePeerAccess(peerDevice, flags) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":24496 * """ * with nogil: * err = cyruntime.cudaDeviceEnablePeerAccess(peerDevice, flags) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_1 = __pyx_f_4cuda_8bindings_9cyruntime_cudaDeviceEnablePeerAccess(__pyx_v_peerDevice, __pyx_v_flags); if (unlikely(__pyx_t_1 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24496, __pyx_L4_error) __pyx_v_err = __pyx_t_1; } /* "cuda/bindings/runtime.pyx":24495 * :py:obj:`~.cudaDeviceCanAccessPeer`, :py:obj:`~.cudaDeviceDisablePeerAccess`, :py:obj:`~.cuCtxEnablePeerAccess` * """ * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceEnablePeerAccess(peerDevice, flags) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":24497 * with nogil: * err = cyruntime.cudaDeviceEnablePeerAccess(peerDevice, flags) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24497, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24497, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24497, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24497, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 24497, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":24451 * return (_dict_cudaError_t[err], canAccessPeer) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceEnablePeerAccess(int peerDevice, unsigned int flags): * """ Enables direct access to memory allocations on a peer device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceEnablePeerAccess", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":24499 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceDisablePeerAccess(int peerDevice): * """ Disables direct access to memory allocations on a peer device. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_331cudaDeviceDisablePeerAccess(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_330cudaDeviceDisablePeerAccess, "cudaDeviceDisablePeerAccess(int peerDevice)\n\nDisables direct access to memory allocations on a peer device.\n\nReturns :py:obj:`~.cudaErrorPeerAccessNotEnabled` if direct access to\nmemory on `peerDevice` has not yet been enabled from the current\ndevice.\n\nParameters\n----------\npeerDevice : int\n Peer device to disable direct access to\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorPeerAccessNotEnabled`, :py:obj:`~.cudaErrorInvalidDevice`\n\nSee Also\n--------\n:py:obj:`~.cudaDeviceCanAccessPeer`, :py:obj:`~.cudaDeviceEnablePeerAccess`, :py:obj:`~.cuCtxDisablePeerAccess`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_331cudaDeviceDisablePeerAccess = {"cudaDeviceDisablePeerAccess", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_331cudaDeviceDisablePeerAccess, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_330cudaDeviceDisablePeerAccess}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_331cudaDeviceDisablePeerAccess(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { int __pyx_v_peerDevice; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaDeviceDisablePeerAccess (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_peerDevice,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24499, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24499, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaDeviceDisablePeerAccess", 0) < (0)) __PYX_ERR(0, 24499, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaDeviceDisablePeerAccess", 1, 1, 1, i); __PYX_ERR(0, 24499, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24499, __pyx_L3_error) } __pyx_v_peerDevice = __Pyx_PyLong_As_int(values[0]); if (unlikely((__pyx_v_peerDevice == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24500, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaDeviceDisablePeerAccess", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24499, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceDisablePeerAccess", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_330cudaDeviceDisablePeerAccess(__pyx_self, __pyx_v_peerDevice); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_330cudaDeviceDisablePeerAccess(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_peerDevice) { cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations cudaError_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaDeviceDisablePeerAccess", 0); /* "cuda/bindings/runtime.pyx":24521 * :py:obj:`~.cudaDeviceCanAccessPeer`, :py:obj:`~.cudaDeviceEnablePeerAccess`, :py:obj:`~.cuCtxDisablePeerAccess` * """ * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceDisablePeerAccess(peerDevice) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":24522 * """ * with nogil: * err = cyruntime.cudaDeviceDisablePeerAccess(peerDevice) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_1 = __pyx_f_4cuda_8bindings_9cyruntime_cudaDeviceDisablePeerAccess(__pyx_v_peerDevice); if (unlikely(__pyx_t_1 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24522, __pyx_L4_error) __pyx_v_err = __pyx_t_1; } /* "cuda/bindings/runtime.pyx":24521 * :py:obj:`~.cudaDeviceCanAccessPeer`, :py:obj:`~.cudaDeviceEnablePeerAccess`, :py:obj:`~.cuCtxDisablePeerAccess` * """ * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDeviceDisablePeerAccess(peerDevice) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":24523 * with nogil: * err = cyruntime.cudaDeviceDisablePeerAccess(peerDevice) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24523, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24523, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24523, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24523, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 24523, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":24499 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDeviceDisablePeerAccess(int peerDevice): * """ Disables direct access to memory allocations on a peer device. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaDeviceDisablePeerAccess", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":24525 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphicsUnregisterResource(resource): * """ Unregisters a graphics resource for access by CUDA. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_333cudaGraphicsUnregisterResource(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_332cudaGraphicsUnregisterResource, "cudaGraphicsUnregisterResource(resource)\n\nUnregisters a graphics resource for access by CUDA.\n\nUnregisters the graphics resource `resource` so it is not accessible by\nCUDA unless registered again.\n\nIf `resource` is invalid then\n:py:obj:`~.cudaErrorInvalidResourceHandle` is returned.\n\nParameters\n----------\nresource : :py:obj:`~.cudaGraphicsResource_t`\n Resource to unregister\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidResourceHandle`, :py:obj:`~.cudaErrorUnknown`\n\nSee Also\n--------\n:py:obj:`~.cudaGraphicsD3D9RegisterResource`, :py:obj:`~.cudaGraphicsD3D10RegisterResource`, :py:obj:`~.cudaGraphicsD3D11RegisterResource`, :py:obj:`~.cudaGraphicsGLRegisterBuffer`, :py:obj:`~.cudaGraphicsGLRegisterImage`, :py:obj:`~.cuGraphicsUnregisterResource`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_333cudaGraphicsUnregisterResource = {"cudaGraphicsUnregisterResource", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_333cudaGraphicsUnregisterResource, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_332cudaGraphicsUnregisterResource}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_333cudaGraphicsUnregisterResource(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_resource = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGraphicsUnregisterResource (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_resource,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24525, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24525, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGraphicsUnregisterResource", 0) < (0)) __PYX_ERR(0, 24525, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGraphicsUnregisterResource", 1, 1, 1, i); __PYX_ERR(0, 24525, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24525, __pyx_L3_error) } __pyx_v_resource = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGraphicsUnregisterResource", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24525, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphicsUnregisterResource", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_332cudaGraphicsUnregisterResource(__pyx_self, __pyx_v_resource); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_332cudaGraphicsUnregisterResource(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_resource) { cudaGraphicsResource_t __pyx_v_cyresource; PyObject *__pyx_v_presource = NULL; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGraphicsUnregisterResource", 0); /* "cuda/bindings/runtime.pyx":24550 * """ * cdef cyruntime.cudaGraphicsResource_t cyresource * if resource is None: # <<<<<<<<<<<<<< * presource = 0 * elif isinstance(resource, (cudaGraphicsResource_t,)): */ __pyx_t_1 = (__pyx_v_resource == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24551 * cdef cyruntime.cudaGraphicsResource_t cyresource * if resource is None: * presource = 0 # <<<<<<<<<<<<<< * elif isinstance(resource, (cudaGraphicsResource_t,)): * presource = int(resource) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_presource = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":24550 * """ * cdef cyruntime.cudaGraphicsResource_t cyresource * if resource is None: # <<<<<<<<<<<<<< * presource = 0 * elif isinstance(resource, (cudaGraphicsResource_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":24552 * if resource is None: * presource = 0 * elif isinstance(resource, (cudaGraphicsResource_t,)): # <<<<<<<<<<<<<< * presource = int(resource) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_resource, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphicsResource_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24553 * presource = 0 * elif isinstance(resource, (cudaGraphicsResource_t,)): * presource = int(resource) # <<<<<<<<<<<<<< * else: * presource = int(cudaGraphicsResource_t(resource)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_resource); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24553, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_presource = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":24552 * if resource is None: * presource = 0 * elif isinstance(resource, (cudaGraphicsResource_t,)): # <<<<<<<<<<<<<< * presource = int(resource) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":24555 * presource = int(resource) * else: * presource = int(cudaGraphicsResource_t(resource)) # <<<<<<<<<<<<<< * cyresource = presource * with nogil: */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphicsResource_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphicsResource_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_resource}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24555, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24555, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_presource = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":24556 * else: * presource = int(cudaGraphicsResource_t(resource)) * cyresource = presource # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGraphicsUnregisterResource(cyresource) */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_presource); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 24556, __pyx_L1_error) __pyx_v_cyresource = ((cudaGraphicsResource_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":24557 * presource = int(cudaGraphicsResource_t(resource)) * cyresource = presource * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphicsUnregisterResource(cyresource) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":24558 * cyresource = presource * with nogil: * err = cyruntime.cudaGraphicsUnregisterResource(cyresource) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGraphicsUnregisterResource(__pyx_v_cyresource); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24558, __pyx_L5_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":24557 * presource = int(cudaGraphicsResource_t(resource)) * cyresource = presource * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphicsUnregisterResource(cyresource) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L6; } __pyx_L5_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L6:; } } /* "cuda/bindings/runtime.pyx":24559 * with nogil: * err = cyruntime.cudaGraphicsUnregisterResource(cyresource) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24559, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24559, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24559, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24559, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 24559, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":24525 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphicsUnregisterResource(resource): * """ Unregisters a graphics resource for access by CUDA. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphicsUnregisterResource", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_presource); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":24561 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphicsResourceSetMapFlags(resource, unsigned int flags): * """ Set usage flags for mapping a graphics resource. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_335cudaGraphicsResourceSetMapFlags(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_334cudaGraphicsResourceSetMapFlags, "cudaGraphicsResourceSetMapFlags(resource, unsigned int flags)\n\nSet usage flags for mapping a graphics resource.\n\nSet `flags` for mapping the graphics resource `resource`.\n\nChanges to `flags` will take effect the next time `resource` is mapped.\nThe `flags` argument may be any of the following:\n\n- :py:obj:`~.cudaGraphicsMapFlagsNone`: Specifies no hints about how\n `resource` will be used. It is therefore assumed that CUDA may read\n from or write to `resource`.\n\n- :py:obj:`~.cudaGraphicsMapFlagsReadOnly`: Specifies that CUDA will\n not write to `resource`.\n\n- :py:obj:`~.cudaGraphicsMapFlagsWriteDiscard`: Specifies CUDA will not\n read from `resource` and will write over the entire contents of\n `resource`, so none of the data previously stored in `resource` will\n be preserved.\n\nIf `resource` is presently mapped for access by CUDA then\n:py:obj:`~.cudaErrorUnknown` is returned. If `flags` is not one of the\nabove values then :py:obj:`~.cudaErrorInvalidValue` is returned.\n\nParameters\n----------\nresource : :py:obj:`~.cudaGraphicsResource_t`\n Registered resource to set flags for\nflags : unsigned int\n Parameters for resource mapping\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidResourceHandle`, :py:obj:`~.cudaErrorUnknown`,\n\nSee Also\n--------\n:py:obj:`~.cudaGraphicsMapResources`, :py:obj:`~.cuGraphicsResourceSetMapFlags`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_335cudaGraphicsResourceSetMapFlags = {"cudaGraphicsResourceSetMapFlags", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_335cudaGraphicsResourceSetMapFlags, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_334cudaGraphicsResourceSetMapFlags}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_335cudaGraphicsResourceSetMapFlags(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_resource = 0; unsigned int __pyx_v_flags; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGraphicsResourceSetMapFlags (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_resource,&__pyx_mstate_global->__pyx_n_u_flags_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24561, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24561, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24561, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGraphicsResourceSetMapFlags", 0) < (0)) __PYX_ERR(0, 24561, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGraphicsResourceSetMapFlags", 1, 2, 2, i); __PYX_ERR(0, 24561, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24561, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24561, __pyx_L3_error) } __pyx_v_resource = values[0]; __pyx_v_flags = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_flags == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24562, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGraphicsResourceSetMapFlags", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 24561, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphicsResourceSetMapFlags", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_334cudaGraphicsResourceSetMapFlags(__pyx_self, __pyx_v_resource, __pyx_v_flags); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_334cudaGraphicsResourceSetMapFlags(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_resource, unsigned int __pyx_v_flags) { cudaGraphicsResource_t __pyx_v_cyresource; PyObject *__pyx_v_presource = NULL; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGraphicsResourceSetMapFlags", 0); /* "cuda/bindings/runtime.pyx":24603 * """ * cdef cyruntime.cudaGraphicsResource_t cyresource * if resource is None: # <<<<<<<<<<<<<< * presource = 0 * elif isinstance(resource, (cudaGraphicsResource_t,)): */ __pyx_t_1 = (__pyx_v_resource == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24604 * cdef cyruntime.cudaGraphicsResource_t cyresource * if resource is None: * presource = 0 # <<<<<<<<<<<<<< * elif isinstance(resource, (cudaGraphicsResource_t,)): * presource = int(resource) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_presource = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":24603 * """ * cdef cyruntime.cudaGraphicsResource_t cyresource * if resource is None: # <<<<<<<<<<<<<< * presource = 0 * elif isinstance(resource, (cudaGraphicsResource_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":24605 * if resource is None: * presource = 0 * elif isinstance(resource, (cudaGraphicsResource_t,)): # <<<<<<<<<<<<<< * presource = int(resource) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_resource, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphicsResource_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24606 * presource = 0 * elif isinstance(resource, (cudaGraphicsResource_t,)): * presource = int(resource) # <<<<<<<<<<<<<< * else: * presource = int(cudaGraphicsResource_t(resource)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_resource); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24606, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_presource = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":24605 * if resource is None: * presource = 0 * elif isinstance(resource, (cudaGraphicsResource_t,)): # <<<<<<<<<<<<<< * presource = int(resource) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":24608 * presource = int(resource) * else: * presource = int(cudaGraphicsResource_t(resource)) # <<<<<<<<<<<<<< * cyresource = presource * with nogil: */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphicsResource_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphicsResource_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_resource}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24608, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24608, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_presource = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":24609 * else: * presource = int(cudaGraphicsResource_t(resource)) * cyresource = presource # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGraphicsResourceSetMapFlags(cyresource, flags) */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_presource); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 24609, __pyx_L1_error) __pyx_v_cyresource = ((cudaGraphicsResource_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":24610 * presource = int(cudaGraphicsResource_t(resource)) * cyresource = presource * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphicsResourceSetMapFlags(cyresource, flags) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":24611 * cyresource = presource * with nogil: * err = cyruntime.cudaGraphicsResourceSetMapFlags(cyresource, flags) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGraphicsResourceSetMapFlags(__pyx_v_cyresource, __pyx_v_flags); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24611, __pyx_L5_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":24610 * presource = int(cudaGraphicsResource_t(resource)) * cyresource = presource * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphicsResourceSetMapFlags(cyresource, flags) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L6; } __pyx_L5_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L6:; } } /* "cuda/bindings/runtime.pyx":24612 * with nogil: * err = cyruntime.cudaGraphicsResourceSetMapFlags(cyresource, flags) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 24612, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":24561 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphicsResourceSetMapFlags(resource, unsigned int flags): * """ Set usage flags for mapping a graphics resource. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphicsResourceSetMapFlags", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_presource); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":24614 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphicsMapResources(int count, resources, stream): * """ Map graphics resources for access by CUDA. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_337cudaGraphicsMapResources(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_336cudaGraphicsMapResources, "cudaGraphicsMapResources(int count, resources, stream)\n\nMap graphics resources for access by CUDA.\n\nMaps the `count` graphics resources in `resources` for access by CUDA.\n\nThe resources in `resources` may be accessed by CUDA until they are\nunmapped. The graphics API from which `resources` were registered\nshould not access any resources while they are mapped by CUDA. If an\napplication does so, the results are undefined.\n\nThis function provides the synchronization guarantee that any graphics\ncalls issued before :py:obj:`~.cudaGraphicsMapResources()` will\ncomplete before any subsequent CUDA work issued in `stream` begins.\n\nIf `resources` contains any duplicate entries then\n:py:obj:`~.cudaErrorInvalidResourceHandle` is returned. If any of\n`resources` are presently mapped for access by CUDA then\n:py:obj:`~.cudaErrorUnknown` is returned.\n\nParameters\n----------\ncount : int\n Number of resources to map\nresources : :py:obj:`~.cudaGraphicsResource_t`\n Resources to map for CUDA\nstream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Stream for synchronization\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidResourceHandle`, :py:obj:`~.cudaErrorUnknown`\n\nSee Also\n--------\n:py:obj:`~.cudaGraphicsResourceGetMappedPointer`, :py:obj:`~.cudaGraphicsSubResourceGetMappedArray`, :py:obj:`~.cudaGraphicsUnmapResources`, :py:obj:`~.cuGraphicsMapResources`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_337cudaGraphicsMapResources = {"cudaGraphicsMapResources", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_337cudaGraphicsMapResources, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_336cudaGraphicsMapResources}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_337cudaGraphicsMapResources(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { int __pyx_v_count; PyObject *__pyx_v_resources = 0; PyObject *__pyx_v_stream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[3] = {0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGraphicsMapResources (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_count,&__pyx_mstate_global->__pyx_n_u_resources,&__pyx_mstate_global->__pyx_n_u_stream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24614, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 24614, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24614, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24614, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGraphicsMapResources", 0) < (0)) __PYX_ERR(0, 24614, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 3; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGraphicsMapResources", 1, 3, 3, i); __PYX_ERR(0, 24614, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 3)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24614, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24614, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 24614, __pyx_L3_error) } __pyx_v_count = __Pyx_PyLong_As_int(values[0]); if (unlikely((__pyx_v_count == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24615, __pyx_L3_error) __pyx_v_resources = values[1]; __pyx_v_stream = values[2]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGraphicsMapResources", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 24614, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphicsMapResources", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_336cudaGraphicsMapResources(__pyx_self, __pyx_v_count, __pyx_v_resources, __pyx_v_stream); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_336cudaGraphicsMapResources(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_count, PyObject *__pyx_v_resources, PyObject *__pyx_v_stream) { cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; cudaGraphicsResource_t *__pyx_v_cyresources; PyObject *__pyx_v_presources = NULL; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; cudaError_t __pyx_t_10; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGraphicsMapResources", 0); /* "cuda/bindings/runtime.pyx":24653 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24654 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":24653 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":24655 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24656 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24656, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":24655 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":24658 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * cdef cyruntime.cudaGraphicsResource_t *cyresources */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_stream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24658, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":24659 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * cdef cyruntime.cudaGraphicsResource_t *cyresources * if resources is None: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 24659, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":24661 * cystream = pstream * cdef cyruntime.cudaGraphicsResource_t *cyresources * if resources is None: # <<<<<<<<<<<<<< * cyresources = NULL * elif isinstance(resources, (cudaGraphicsResource_t,)): */ __pyx_t_1 = (__pyx_v_resources == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24662 * cdef cyruntime.cudaGraphicsResource_t *cyresources * if resources is None: * cyresources = NULL # <<<<<<<<<<<<<< * elif isinstance(resources, (cudaGraphicsResource_t,)): * presources = resources.getPtr() */ __pyx_v_cyresources = ((cudaGraphicsResource_t *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)NULL)); /* "cuda/bindings/runtime.pyx":24661 * cystream = pstream * cdef cyruntime.cudaGraphicsResource_t *cyresources * if resources is None: # <<<<<<<<<<<<<< * cyresources = NULL * elif isinstance(resources, (cudaGraphicsResource_t,)): */ goto __pyx_L6; } /* "cuda/bindings/runtime.pyx":24663 * if resources is None: * cyresources = NULL * elif isinstance(resources, (cudaGraphicsResource_t,)): # <<<<<<<<<<<<<< * presources = resources.getPtr() * cyresources = presources */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_resources, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphicsResource_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24664 * cyresources = NULL * elif isinstance(resources, (cudaGraphicsResource_t,)): * presources = resources.getPtr() # <<<<<<<<<<<<<< * cyresources = presources * elif isinstance(resources, (int)): */ __pyx_t_3 = __pyx_v_resources; __Pyx_INCREF(__pyx_t_3); __pyx_t_6 = 0; { PyObject *__pyx_callargs[2] = {__pyx_t_3, NULL}; __pyx_t_5 = __Pyx_PyObject_FastCallMethod(__pyx_mstate_global->__pyx_n_u_getPtr, __pyx_callargs+__pyx_t_6, (1-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24664, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); } __pyx_v_presources = __pyx_t_5; __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":24665 * elif isinstance(resources, (cudaGraphicsResource_t,)): * presources = resources.getPtr() * cyresources = presources # <<<<<<<<<<<<<< * elif isinstance(resources, (int)): * cyresources = resources */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_presources); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 24665, __pyx_L1_error) __pyx_v_cyresources = ((cudaGraphicsResource_t *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":24663 * if resources is None: * cyresources = NULL * elif isinstance(resources, (cudaGraphicsResource_t,)): # <<<<<<<<<<<<<< * presources = resources.getPtr() * cyresources = presources */ goto __pyx_L6; } /* "cuda/bindings/runtime.pyx":24666 * presources = resources.getPtr() * cyresources = presources * elif isinstance(resources, (int)): # <<<<<<<<<<<<<< * cyresources = resources * else: */ __pyx_t_1 = PyLong_Check(__pyx_v_resources); if (likely(__pyx_t_1)) { /* "cuda/bindings/runtime.pyx":24667 * cyresources = presources * elif isinstance(resources, (int)): * cyresources = resources # <<<<<<<<<<<<<< * else: * raise TypeError("Argument 'resources' is not instance of type (expected , found " + str(type(resources))) */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_resources); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 24667, __pyx_L1_error) __pyx_v_cyresources = ((cudaGraphicsResource_t *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":24666 * presources = resources.getPtr() * cyresources = presources * elif isinstance(resources, (int)): # <<<<<<<<<<<<<< * cyresources = resources * else: */ goto __pyx_L6; } /* "cuda/bindings/runtime.pyx":24669 * cyresources = resources * else: * raise TypeError("Argument 'resources' is not instance of type (expected , found " + str(type(resources))) # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGraphicsMapResources(count, cyresources, cystream) */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF(__pyx_builtin_TypeError); __pyx_t_4 = __pyx_builtin_TypeError; __pyx_t_8 = __Pyx_PyObject_Unicode(((PyObject *)Py_TYPE(__pyx_v_resources))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 24669, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_9 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_Argument_resources_is_not_instan, __pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 24669, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_9}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24669, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); } __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(0, 24669, __pyx_L1_error) } __pyx_L6:; /* "cuda/bindings/runtime.pyx":24670 * else: * raise TypeError("Argument 'resources' is not instance of type (expected , found " + str(type(resources))) * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphicsMapResources(count, cyresources, cystream) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":24671 * raise TypeError("Argument 'resources' is not instance of type (expected , found " + str(type(resources))) * with nogil: * err = cyruntime.cudaGraphicsMapResources(count, cyresources, cystream) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_10 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGraphicsMapResources(__pyx_v_count, __pyx_v_cyresources, __pyx_v_cystream); if (unlikely(__pyx_t_10 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24671, __pyx_L8_error) __pyx_v_err = __pyx_t_10; } /* "cuda/bindings/runtime.pyx":24670 * else: * raise TypeError("Argument 'resources' is not instance of type (expected , found " + str(type(resources))) * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphicsMapResources(count, cyresources, cystream) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L9; } __pyx_L8_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L9:; } } /* "cuda/bindings/runtime.pyx":24672 * with nogil: * err = cyruntime.cudaGraphicsMapResources(count, cyresources, cystream) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24672, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24672, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_9 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 24672, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24672, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_9); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_9) != (0)) __PYX_ERR(0, 24672, __pyx_L1_error); __pyx_t_9 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":24614 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphicsMapResources(int count, resources, stream): * """ Map graphics resources for access by CUDA. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphicsMapResources", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XDECREF(__pyx_v_presources); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":24674 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphicsUnmapResources(int count, resources, stream): * """ Unmap graphics resources. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_339cudaGraphicsUnmapResources(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_338cudaGraphicsUnmapResources, "cudaGraphicsUnmapResources(int count, resources, stream)\n\nUnmap graphics resources.\n\nUnmaps the `count` graphics resources in `resources`.\n\nOnce unmapped, the resources in `resources` may not be accessed by CUDA\nuntil they are mapped again.\n\nThis function provides the synchronization guarantee that any CUDA work\nissued in `stream` before :py:obj:`~.cudaGraphicsUnmapResources()` will\ncomplete before any subsequently issued graphics work begins.\n\nIf `resources` contains any duplicate entries then\n:py:obj:`~.cudaErrorInvalidResourceHandle` is returned. If any of\n`resources` are not presently mapped for access by CUDA then\n:py:obj:`~.cudaErrorUnknown` is returned.\n\nParameters\n----------\ncount : int\n Number of resources to unmap\nresources : :py:obj:`~.cudaGraphicsResource_t`\n Resources to unmap\nstream : :py:obj:`~.CUstream` or :py:obj:`~.cudaStream_t`\n Stream for synchronization\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidResourceHandle`, :py:obj:`~.cudaErrorUnknown`\n\nSee Also\n--------\n:py:obj:`~.cudaGraphicsMapResources`, :py:obj:`~.cuGraphicsUnmapResources`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_339cudaGraphicsUnmapResources = {"cudaGraphicsUnmapResources", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_339cudaGraphicsUnmapResources, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_338cudaGraphicsUnmapResources}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_339cudaGraphicsUnmapResources(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { int __pyx_v_count; PyObject *__pyx_v_resources = 0; PyObject *__pyx_v_stream = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[3] = {0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGraphicsUnmapResources (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_count,&__pyx_mstate_global->__pyx_n_u_resources,&__pyx_mstate_global->__pyx_n_u_stream,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24674, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 24674, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24674, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24674, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGraphicsUnmapResources", 0) < (0)) __PYX_ERR(0, 24674, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 3; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGraphicsUnmapResources", 1, 3, 3, i); __PYX_ERR(0, 24674, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 3)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24674, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24674, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 24674, __pyx_L3_error) } __pyx_v_count = __Pyx_PyLong_As_int(values[0]); if (unlikely((__pyx_v_count == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24675, __pyx_L3_error) __pyx_v_resources = values[1]; __pyx_v_stream = values[2]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGraphicsUnmapResources", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 24674, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphicsUnmapResources", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_338cudaGraphicsUnmapResources(__pyx_self, __pyx_v_count, __pyx_v_resources, __pyx_v_stream); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_338cudaGraphicsUnmapResources(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_count, PyObject *__pyx_v_resources, PyObject *__pyx_v_stream) { cudaStream_t __pyx_v_cystream; PyObject *__pyx_v_pstream = NULL; cudaGraphicsResource_t *__pyx_v_cyresources; PyObject *__pyx_v_presources = NULL; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; cudaError_t __pyx_t_10; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGraphicsUnmapResources", 0); /* "cuda/bindings/runtime.pyx":24711 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ __pyx_t_1 = (__pyx_v_stream == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24712 * cdef cyruntime.cudaStream_t cystream * if stream is None: * pstream = 0 # <<<<<<<<<<<<<< * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pstream = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":24711 * """ * cdef cyruntime.cudaStream_t cystream * if stream is None: # <<<<<<<<<<<<<< * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":24713 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_stream, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUstream); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24714 * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): * pstream = int(stream) # <<<<<<<<<<<<<< * else: * pstream = int(cudaStream_t(stream)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_stream); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24714, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pstream = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":24713 * if stream is None: * pstream = 0 * elif isinstance(stream, (cudaStream_t,driver.CUstream)): # <<<<<<<<<<<<<< * pstream = int(stream) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":24716 * pstream = int(stream) * else: * pstream = int(cudaStream_t(stream)) # <<<<<<<<<<<<<< * cystream = pstream * cdef cyruntime.cudaGraphicsResource_t *cyresources */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaStream_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_stream}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24716, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24716, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pstream = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":24717 * else: * pstream = int(cudaStream_t(stream)) * cystream = pstream # <<<<<<<<<<<<<< * cdef cyruntime.cudaGraphicsResource_t *cyresources * if resources is None: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pstream); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 24717, __pyx_L1_error) __pyx_v_cystream = ((cudaStream_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":24719 * cystream = pstream * cdef cyruntime.cudaGraphicsResource_t *cyresources * if resources is None: # <<<<<<<<<<<<<< * cyresources = NULL * elif isinstance(resources, (cudaGraphicsResource_t,)): */ __pyx_t_1 = (__pyx_v_resources == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24720 * cdef cyruntime.cudaGraphicsResource_t *cyresources * if resources is None: * cyresources = NULL # <<<<<<<<<<<<<< * elif isinstance(resources, (cudaGraphicsResource_t,)): * presources = resources.getPtr() */ __pyx_v_cyresources = ((cudaGraphicsResource_t *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)NULL)); /* "cuda/bindings/runtime.pyx":24719 * cystream = pstream * cdef cyruntime.cudaGraphicsResource_t *cyresources * if resources is None: # <<<<<<<<<<<<<< * cyresources = NULL * elif isinstance(resources, (cudaGraphicsResource_t,)): */ goto __pyx_L6; } /* "cuda/bindings/runtime.pyx":24721 * if resources is None: * cyresources = NULL * elif isinstance(resources, (cudaGraphicsResource_t,)): # <<<<<<<<<<<<<< * presources = resources.getPtr() * cyresources = presources */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_resources, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphicsResource_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24722 * cyresources = NULL * elif isinstance(resources, (cudaGraphicsResource_t,)): * presources = resources.getPtr() # <<<<<<<<<<<<<< * cyresources = presources * elif isinstance(resources, (int)): */ __pyx_t_3 = __pyx_v_resources; __Pyx_INCREF(__pyx_t_3); __pyx_t_6 = 0; { PyObject *__pyx_callargs[2] = {__pyx_t_3, NULL}; __pyx_t_5 = __Pyx_PyObject_FastCallMethod(__pyx_mstate_global->__pyx_n_u_getPtr, __pyx_callargs+__pyx_t_6, (1-__pyx_t_6) | (1*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24722, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); } __pyx_v_presources = __pyx_t_5; __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":24723 * elif isinstance(resources, (cudaGraphicsResource_t,)): * presources = resources.getPtr() * cyresources = presources # <<<<<<<<<<<<<< * elif isinstance(resources, (int)): * cyresources = resources */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_presources); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 24723, __pyx_L1_error) __pyx_v_cyresources = ((cudaGraphicsResource_t *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":24721 * if resources is None: * cyresources = NULL * elif isinstance(resources, (cudaGraphicsResource_t,)): # <<<<<<<<<<<<<< * presources = resources.getPtr() * cyresources = presources */ goto __pyx_L6; } /* "cuda/bindings/runtime.pyx":24724 * presources = resources.getPtr() * cyresources = presources * elif isinstance(resources, (int)): # <<<<<<<<<<<<<< * cyresources = resources * else: */ __pyx_t_1 = PyLong_Check(__pyx_v_resources); if (likely(__pyx_t_1)) { /* "cuda/bindings/runtime.pyx":24725 * cyresources = presources * elif isinstance(resources, (int)): * cyresources = resources # <<<<<<<<<<<<<< * else: * raise TypeError("Argument 'resources' is not instance of type (expected , found " + str(type(resources))) */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_resources); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 24725, __pyx_L1_error) __pyx_v_cyresources = ((cudaGraphicsResource_t *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":24724 * presources = resources.getPtr() * cyresources = presources * elif isinstance(resources, (int)): # <<<<<<<<<<<<<< * cyresources = resources * else: */ goto __pyx_L6; } /* "cuda/bindings/runtime.pyx":24727 * cyresources = resources * else: * raise TypeError("Argument 'resources' is not instance of type (expected , found " + str(type(resources))) # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGraphicsUnmapResources(count, cyresources, cystream) */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF(__pyx_builtin_TypeError); __pyx_t_4 = __pyx_builtin_TypeError; __pyx_t_8 = __Pyx_PyObject_Unicode(((PyObject *)Py_TYPE(__pyx_v_resources))); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 24727, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __pyx_t_9 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_Argument_resources_is_not_instan, __pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 24727, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_9}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24727, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); } __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(0, 24727, __pyx_L1_error) } __pyx_L6:; /* "cuda/bindings/runtime.pyx":24728 * else: * raise TypeError("Argument 'resources' is not instance of type (expected , found " + str(type(resources))) * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphicsUnmapResources(count, cyresources, cystream) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":24729 * raise TypeError("Argument 'resources' is not instance of type (expected , found " + str(type(resources))) * with nogil: * err = cyruntime.cudaGraphicsUnmapResources(count, cyresources, cystream) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_10 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGraphicsUnmapResources(__pyx_v_count, __pyx_v_cyresources, __pyx_v_cystream); if (unlikely(__pyx_t_10 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24729, __pyx_L8_error) __pyx_v_err = __pyx_t_10; } /* "cuda/bindings/runtime.pyx":24728 * else: * raise TypeError("Argument 'resources' is not instance of type (expected , found " + str(type(resources))) * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphicsUnmapResources(count, cyresources, cystream) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L9; } __pyx_L8_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L9:; } } /* "cuda/bindings/runtime.pyx":24730 * with nogil: * err = cyruntime.cudaGraphicsUnmapResources(count, cyresources, cystream) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24730, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24730, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_9 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 24730, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24730, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_9); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_9) != (0)) __PYX_ERR(0, 24730, __pyx_L1_error); __pyx_t_9 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":24674 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphicsUnmapResources(int count, resources, stream): * """ Unmap graphics resources. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphicsUnmapResources", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pstream); __Pyx_XDECREF(__pyx_v_presources); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":24732 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphicsResourceGetMappedPointer(resource): * """ Get an device pointer through which to access a mapped graphics resource. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_341cudaGraphicsResourceGetMappedPointer(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_340cudaGraphicsResourceGetMappedPointer, "cudaGraphicsResourceGetMappedPointer(resource)\n\nGet an device pointer through which to access a mapped graphics resource.\n\nReturns in `*devPtr` a pointer through which the mapped graphics\nresource `resource` may be accessed. Returns in `*size` the size of the\nmemory in bytes which may be accessed from that pointer. The value set\nin `devPtr` may change every time that `resource` is mapped.\n\nIf `resource` is not a buffer then it cannot be accessed via a pointer\nand :py:obj:`~.cudaErrorUnknown` is returned. If `resource` is not\nmapped then :py:obj:`~.cudaErrorUnknown` is returned.\n\nParameters\n----------\nresource : :py:obj:`~.cudaGraphicsResource_t`\n None\n\nReturns\n-------\ncudaError_t\n\ndevPtr : Any\n None\nsize : int\n None"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_341cudaGraphicsResourceGetMappedPointer = {"cudaGraphicsResourceGetMappedPointer", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_341cudaGraphicsResourceGetMappedPointer, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_340cudaGraphicsResourceGetMappedPointer}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_341cudaGraphicsResourceGetMappedPointer(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_resource = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGraphicsResourceGetMappedPointer (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_resource,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24732, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24732, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGraphicsResourceGetMappedPointer", 0) < (0)) __PYX_ERR(0, 24732, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGraphicsResourceGetMappedPointer", 1, 1, 1, i); __PYX_ERR(0, 24732, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24732, __pyx_L3_error) } __pyx_v_resource = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGraphicsResourceGetMappedPointer", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24732, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphicsResourceGetMappedPointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_340cudaGraphicsResourceGetMappedPointer(__pyx_self, __pyx_v_resource); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_340cudaGraphicsResourceGetMappedPointer(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_resource) { cudaGraphicsResource_t __pyx_v_cyresource; PyObject *__pyx_v_presource = NULL; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_v_devPtr; size_t __pyx_v_size; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; cudaError_t __pyx_t_7; PyObject *__pyx_t_8 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGraphicsResourceGetMappedPointer", 0); /* "cuda/bindings/runtime.pyx":24760 * """ * cdef cyruntime.cudaGraphicsResource_t cyresource * if resource is None: # <<<<<<<<<<<<<< * presource = 0 * elif isinstance(resource, (cudaGraphicsResource_t,)): */ __pyx_t_1 = (__pyx_v_resource == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24761 * cdef cyruntime.cudaGraphicsResource_t cyresource * if resource is None: * presource = 0 # <<<<<<<<<<<<<< * elif isinstance(resource, (cudaGraphicsResource_t,)): * presource = int(resource) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_presource = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":24760 * """ * cdef cyruntime.cudaGraphicsResource_t cyresource * if resource is None: # <<<<<<<<<<<<<< * presource = 0 * elif isinstance(resource, (cudaGraphicsResource_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":24762 * if resource is None: * presource = 0 * elif isinstance(resource, (cudaGraphicsResource_t,)): # <<<<<<<<<<<<<< * presource = int(resource) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_resource, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphicsResource_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24763 * presource = 0 * elif isinstance(resource, (cudaGraphicsResource_t,)): * presource = int(resource) # <<<<<<<<<<<<<< * else: * presource = int(cudaGraphicsResource_t(resource)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_resource); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24763, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_presource = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":24762 * if resource is None: * presource = 0 * elif isinstance(resource, (cudaGraphicsResource_t,)): # <<<<<<<<<<<<<< * presource = int(resource) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":24765 * presource = int(resource) * else: * presource = int(cudaGraphicsResource_t(resource)) # <<<<<<<<<<<<<< * cyresource = presource * cdef void_ptr devPtr = 0 */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphicsResource_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphicsResource_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_resource}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24765, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24765, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_presource = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":24766 * else: * presource = int(cudaGraphicsResource_t(resource)) * cyresource = presource # <<<<<<<<<<<<<< * cdef void_ptr devPtr = 0 * cdef size_t size = 0 */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_presource); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 24766, __pyx_L1_error) __pyx_v_cyresource = ((cudaGraphicsResource_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":24767 * presource = int(cudaGraphicsResource_t(resource)) * cyresource = presource * cdef void_ptr devPtr = 0 # <<<<<<<<<<<<<< * cdef size_t size = 0 * with nogil: */ __pyx_v_devPtr = 0; /* "cuda/bindings/runtime.pyx":24768 * cyresource = presource * cdef void_ptr devPtr = 0 * cdef size_t size = 0 # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGraphicsResourceGetMappedPointer(&devPtr, &size, cyresource) */ __pyx_v_size = 0; /* "cuda/bindings/runtime.pyx":24769 * cdef void_ptr devPtr = 0 * cdef size_t size = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphicsResourceGetMappedPointer(&devPtr, &size, cyresource) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":24770 * cdef size_t size = 0 * with nogil: * err = cyruntime.cudaGraphicsResourceGetMappedPointer(&devPtr, &size, cyresource) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None, None) */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGraphicsResourceGetMappedPointer(((void **)(&__pyx_v_devPtr)), (&__pyx_v_size), __pyx_v_cyresource); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24770, __pyx_L5_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":24769 * cdef void_ptr devPtr = 0 * cdef size_t size = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphicsResourceGetMappedPointer(&devPtr, &size, cyresource) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L6; } __pyx_L5_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L6:; } } /* "cuda/bindings/runtime.pyx":24771 * with nogil: * err = cyruntime.cudaGraphicsResourceGetMappedPointer(&devPtr, &size, cyresource) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None, None) * return (_dict_cudaError_t[err], devPtr, size) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24772 * err = cyruntime.cudaGraphicsResourceGetMappedPointer(&devPtr, &size, cyresource) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None, None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], devPtr, size) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24772, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24772, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24772, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24772, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 24772, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, Py_None) != (0)) __PYX_ERR(0, 24772, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 2, Py_None) != (0)) __PYX_ERR(0, 24772, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":24771 * with nogil: * err = cyruntime.cudaGraphicsResourceGetMappedPointer(&devPtr, &size, cyresource) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None, None) * return (_dict_cudaError_t[err], devPtr, size) */ } /* "cuda/bindings/runtime.pyx":24773 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None, None) * return (_dict_cudaError_t[err], devPtr, size) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24773, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24773, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24773, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyLong_From_unsigned_PY_LONG_LONG(__pyx_v_devPtr); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24773, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_FromSize_t(__pyx_v_size); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24773, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 24773, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 24773, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_3) != (0)) __PYX_ERR(0, 24773, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_2) != (0)) __PYX_ERR(0, 24773, __pyx_L1_error); __pyx_t_4 = 0; __pyx_t_3 = 0; __pyx_t_2 = 0; __pyx_r = __pyx_t_8; __pyx_t_8 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":24732 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphicsResourceGetMappedPointer(resource): * """ Get an device pointer through which to access a mapped graphics resource. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphicsResourceGetMappedPointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_presource); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":24775 * return (_dict_cudaError_t[err], devPtr, size) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphicsSubResourceGetMappedArray(resource, unsigned int arrayIndex, unsigned int mipLevel): * """ Get an array through which to access a subresource of a mapped graphics resource. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_343cudaGraphicsSubResourceGetMappedArray(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_342cudaGraphicsSubResourceGetMappedArray, "cudaGraphicsSubResourceGetMappedArray(resource, unsigned int arrayIndex, unsigned int mipLevel)\n\nGet an array through which to access a subresource of a mapped graphics resource.\n\nReturns in `*array` an array through which the subresource of the\nmapped graphics resource `resource` which corresponds to array index\n`arrayIndex` and mipmap level `mipLevel` may be accessed. The value set\nin `array` may change every time that `resource` is mapped.\n\nIf `resource` is not a texture then it cannot be accessed via an array\nand :py:obj:`~.cudaErrorUnknown` is returned. If `arrayIndex` is not a\nvalid array index for `resource` then :py:obj:`~.cudaErrorInvalidValue`\nis returned. If `mipLevel` is not a valid mipmap level for `resource`\nthen :py:obj:`~.cudaErrorInvalidValue` is returned. If `resource` is\nnot mapped then :py:obj:`~.cudaErrorUnknown` is returned.\n\nParameters\n----------\nresource : :py:obj:`~.cudaGraphicsResource_t`\n Mapped resource to access\narrayIndex : unsigned int\n Array index for array textures or cubemap face index as defined by\n :py:obj:`~.cudaGraphicsCubeFace` for cubemap textures for the\n subresource to access\nmipLevel : unsigned int\n Mipmap level for the subresource to access\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidResourceHandle`, :py:obj:`~.cudaErrorUnknown`\narray : :py:obj:`~.cudaArray_t`\n Returned array through which a subresource of `resource` may be\n accessed\n\nSee Also\n--------\n:py:obj:`~.cudaGraphicsResourceGetMappedPointer`, :py:obj:`~.cuGraphicsSubResourceGetMappedArray`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_343cudaGraphicsSubResourceGetMappedArray = {"cudaGraphicsSubResourceGetMappedArray", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_343cudaGraphicsSubResourceGetMappedArray, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_342cudaGraphicsSubResourceGetMappedArray}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_343cudaGraphicsSubResourceGetMappedArray(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_resource = 0; unsigned int __pyx_v_arrayIndex; unsigned int __pyx_v_mipLevel; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[3] = {0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGraphicsSubResourceGetMappedArray (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_resource,&__pyx_mstate_global->__pyx_n_u_arrayIndex,&__pyx_mstate_global->__pyx_n_u_mipLevel,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24775, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 24775, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24775, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24775, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGraphicsSubResourceGetMappedArray", 0) < (0)) __PYX_ERR(0, 24775, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 3; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGraphicsSubResourceGetMappedArray", 1, 3, 3, i); __PYX_ERR(0, 24775, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 3)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24775, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24775, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 24775, __pyx_L3_error) } __pyx_v_resource = values[0]; __pyx_v_arrayIndex = __Pyx_PyLong_As_unsigned_int(values[1]); if (unlikely((__pyx_v_arrayIndex == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24776, __pyx_L3_error) __pyx_v_mipLevel = __Pyx_PyLong_As_unsigned_int(values[2]); if (unlikely((__pyx_v_mipLevel == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24776, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGraphicsSubResourceGetMappedArray", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 24775, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphicsSubResourceGetMappedArray", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_342cudaGraphicsSubResourceGetMappedArray(__pyx_self, __pyx_v_resource, __pyx_v_arrayIndex, __pyx_v_mipLevel); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_342cudaGraphicsSubResourceGetMappedArray(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_resource, unsigned int __pyx_v_arrayIndex, unsigned int __pyx_v_mipLevel) { cudaGraphicsResource_t __pyx_v_cyresource; PyObject *__pyx_v_presource = NULL; struct __pyx_obj_4cuda_8bindings_7runtime_cudaArray_t *__pyx_v_array = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGraphicsSubResourceGetMappedArray", 0); /* "cuda/bindings/runtime.pyx":24815 * """ * cdef cyruntime.cudaGraphicsResource_t cyresource * if resource is None: # <<<<<<<<<<<<<< * presource = 0 * elif isinstance(resource, (cudaGraphicsResource_t,)): */ __pyx_t_1 = (__pyx_v_resource == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24816 * cdef cyruntime.cudaGraphicsResource_t cyresource * if resource is None: * presource = 0 # <<<<<<<<<<<<<< * elif isinstance(resource, (cudaGraphicsResource_t,)): * presource = int(resource) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_presource = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":24815 * """ * cdef cyruntime.cudaGraphicsResource_t cyresource * if resource is None: # <<<<<<<<<<<<<< * presource = 0 * elif isinstance(resource, (cudaGraphicsResource_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":24817 * if resource is None: * presource = 0 * elif isinstance(resource, (cudaGraphicsResource_t,)): # <<<<<<<<<<<<<< * presource = int(resource) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_resource, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphicsResource_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24818 * presource = 0 * elif isinstance(resource, (cudaGraphicsResource_t,)): * presource = int(resource) # <<<<<<<<<<<<<< * else: * presource = int(cudaGraphicsResource_t(resource)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_resource); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24818, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_presource = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":24817 * if resource is None: * presource = 0 * elif isinstance(resource, (cudaGraphicsResource_t,)): # <<<<<<<<<<<<<< * presource = int(resource) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":24820 * presource = int(resource) * else: * presource = int(cudaGraphicsResource_t(resource)) # <<<<<<<<<<<<<< * cyresource = presource * cdef cudaArray_t array = cudaArray_t() */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphicsResource_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphicsResource_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_resource}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24820, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24820, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_presource = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":24821 * else: * presource = int(cudaGraphicsResource_t(resource)) * cyresource = presource # <<<<<<<<<<<<<< * cdef cudaArray_t array = cudaArray_t() * with nogil: */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_presource); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 24821, __pyx_L1_error) __pyx_v_cyresource = ((cudaGraphicsResource_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":24822 * presource = int(cudaGraphicsResource_t(resource)) * cyresource = presource * cdef cudaArray_t array = cudaArray_t() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGraphicsSubResourceGetMappedArray(array._pvt_ptr, cyresource, arrayIndex, mipLevel) */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24822, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_4); } __pyx_v_array = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaArray_t *)__pyx_t_4); __pyx_t_4 = 0; /* "cuda/bindings/runtime.pyx":24823 * cyresource = presource * cdef cudaArray_t array = cudaArray_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphicsSubResourceGetMappedArray(array._pvt_ptr, cyresource, arrayIndex, mipLevel) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":24824 * cdef cudaArray_t array = cudaArray_t() * with nogil: * err = cyruntime.cudaGraphicsSubResourceGetMappedArray(array._pvt_ptr, cyresource, arrayIndex, mipLevel) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGraphicsSubResourceGetMappedArray(((cudaArray_t *)__pyx_v_array->_pvt_ptr), __pyx_v_cyresource, __pyx_v_arrayIndex, __pyx_v_mipLevel); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24824, __pyx_L5_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":24823 * cyresource = presource * cdef cudaArray_t array = cudaArray_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphicsSubResourceGetMappedArray(array._pvt_ptr, cyresource, arrayIndex, mipLevel) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L6; } __pyx_L5_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L6:; } } /* "cuda/bindings/runtime.pyx":24825 * with nogil: * err = cyruntime.cudaGraphicsSubResourceGetMappedArray(array._pvt_ptr, cyresource, arrayIndex, mipLevel) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], array) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24826 * err = cyruntime.cudaGraphicsSubResourceGetMappedArray(array._pvt_ptr, cyresource, arrayIndex, mipLevel) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], array) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24826, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24826, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24826, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24826, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 24826, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 24826, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":24825 * with nogil: * err = cyruntime.cudaGraphicsSubResourceGetMappedArray(array._pvt_ptr, cyresource, arrayIndex, mipLevel) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], array) */ } /* "cuda/bindings/runtime.pyx":24827 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], array) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24827, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24827, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24827, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24827, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 24827, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_array); __Pyx_GIVEREF((PyObject *)__pyx_v_array); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_array)) != (0)) __PYX_ERR(0, 24827, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":24775 * return (_dict_cudaError_t[err], devPtr, size) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphicsSubResourceGetMappedArray(resource, unsigned int arrayIndex, unsigned int mipLevel): * """ Get an array through which to access a subresource of a mapped graphics resource. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphicsSubResourceGetMappedArray", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_presource); __Pyx_XDECREF((PyObject *)__pyx_v_array); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":24829 * return (_dict_cudaError_t[err], array) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphicsResourceGetMappedMipmappedArray(resource): * """ Get a mipmapped array through which to access a mapped graphics resource. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_345cudaGraphicsResourceGetMappedMipmappedArray(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_344cudaGraphicsResourceGetMappedMipmappedArray, "cudaGraphicsResourceGetMappedMipmappedArray(resource)\n\nGet a mipmapped array through which to access a mapped graphics resource.\n\nReturns in `*mipmappedArray` a mipmapped array through which the mapped\ngraphics resource `resource` may be accessed. The value set in\n`mipmappedArray` may change every time that `resource` is mapped.\n\nIf `resource` is not a texture then it cannot be accessed via an array\nand :py:obj:`~.cudaErrorUnknown` is returned. If `resource` is not\nmapped then :py:obj:`~.cudaErrorUnknown` is returned.\n\nParameters\n----------\nresource : :py:obj:`~.cudaGraphicsResource_t`\n Mapped resource to access\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidResourceHandle`, :py:obj:`~.cudaErrorUnknown`\nmipmappedArray : :py:obj:`~.cudaMipmappedArray_t`\n Returned mipmapped array through which `resource` may be accessed\n\nSee Also\n--------\n:py:obj:`~.cudaGraphicsResourceGetMappedPointer`, :py:obj:`~.cuGraphicsResourceGetMappedMipmappedArray`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_345cudaGraphicsResourceGetMappedMipmappedArray = {"cudaGraphicsResourceGetMappedMipmappedArray", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_345cudaGraphicsResourceGetMappedMipmappedArray, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_344cudaGraphicsResourceGetMappedMipmappedArray}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_345cudaGraphicsResourceGetMappedMipmappedArray(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_resource = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGraphicsResourceGetMappedMipmappedArray (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_resource,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24829, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24829, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGraphicsResourceGetMappedMipmappedArray", 0) < (0)) __PYX_ERR(0, 24829, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGraphicsResourceGetMappedMipmappedArray", 1, 1, 1, i); __PYX_ERR(0, 24829, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24829, __pyx_L3_error) } __pyx_v_resource = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGraphicsResourceGetMappedMipmappedArray", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24829, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphicsResourceGetMappedMipmappedArray", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_344cudaGraphicsResourceGetMappedMipmappedArray(__pyx_self, __pyx_v_resource); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_344cudaGraphicsResourceGetMappedMipmappedArray(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_resource) { cudaGraphicsResource_t __pyx_v_cyresource; PyObject *__pyx_v_presource = NULL; struct __pyx_obj_4cuda_8bindings_7runtime_cudaMipmappedArray_t *__pyx_v_mipmappedArray = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGraphicsResourceGetMappedMipmappedArray", 0); /* "cuda/bindings/runtime.pyx":24858 * """ * cdef cyruntime.cudaGraphicsResource_t cyresource * if resource is None: # <<<<<<<<<<<<<< * presource = 0 * elif isinstance(resource, (cudaGraphicsResource_t,)): */ __pyx_t_1 = (__pyx_v_resource == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24859 * cdef cyruntime.cudaGraphicsResource_t cyresource * if resource is None: * presource = 0 # <<<<<<<<<<<<<< * elif isinstance(resource, (cudaGraphicsResource_t,)): * presource = int(resource) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_presource = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":24858 * """ * cdef cyruntime.cudaGraphicsResource_t cyresource * if resource is None: # <<<<<<<<<<<<<< * presource = 0 * elif isinstance(resource, (cudaGraphicsResource_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":24860 * if resource is None: * presource = 0 * elif isinstance(resource, (cudaGraphicsResource_t,)): # <<<<<<<<<<<<<< * presource = int(resource) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_resource, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphicsResource_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24861 * presource = 0 * elif isinstance(resource, (cudaGraphicsResource_t,)): * presource = int(resource) # <<<<<<<<<<<<<< * else: * presource = int(cudaGraphicsResource_t(resource)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_resource); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24861, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_presource = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":24860 * if resource is None: * presource = 0 * elif isinstance(resource, (cudaGraphicsResource_t,)): # <<<<<<<<<<<<<< * presource = int(resource) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":24863 * presource = int(resource) * else: * presource = int(cudaGraphicsResource_t(resource)) # <<<<<<<<<<<<<< * cyresource = presource * cdef cudaMipmappedArray_t mipmappedArray = cudaMipmappedArray_t() */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphicsResource_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphicsResource_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_resource}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24863, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24863, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_presource = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":24864 * else: * presource = int(cudaGraphicsResource_t(resource)) * cyresource = presource # <<<<<<<<<<<<<< * cdef cudaMipmappedArray_t mipmappedArray = cudaMipmappedArray_t() * with nogil: */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_presource); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 24864, __pyx_L1_error) __pyx_v_cyresource = ((cudaGraphicsResource_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":24865 * presource = int(cudaGraphicsResource_t(resource)) * cyresource = presource * cdef cudaMipmappedArray_t mipmappedArray = cudaMipmappedArray_t() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGraphicsResourceGetMappedMipmappedArray(mipmappedArray._pvt_ptr, cyresource) */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMipmappedArray_t); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMipmappedArray_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24865, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_4); } __pyx_v_mipmappedArray = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaMipmappedArray_t *)__pyx_t_4); __pyx_t_4 = 0; /* "cuda/bindings/runtime.pyx":24866 * cyresource = presource * cdef cudaMipmappedArray_t mipmappedArray = cudaMipmappedArray_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphicsResourceGetMappedMipmappedArray(mipmappedArray._pvt_ptr, cyresource) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":24867 * cdef cudaMipmappedArray_t mipmappedArray = cudaMipmappedArray_t() * with nogil: * err = cyruntime.cudaGraphicsResourceGetMappedMipmappedArray(mipmappedArray._pvt_ptr, cyresource) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGraphicsResourceGetMappedMipmappedArray(((cudaMipmappedArray_t *)__pyx_v_mipmappedArray->_pvt_ptr), __pyx_v_cyresource); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24867, __pyx_L5_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":24866 * cyresource = presource * cdef cudaMipmappedArray_t mipmappedArray = cudaMipmappedArray_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphicsResourceGetMappedMipmappedArray(mipmappedArray._pvt_ptr, cyresource) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L6; } __pyx_L5_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L6:; } } /* "cuda/bindings/runtime.pyx":24868 * with nogil: * err = cyruntime.cudaGraphicsResourceGetMappedMipmappedArray(mipmappedArray._pvt_ptr, cyresource) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], mipmappedArray) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24869 * err = cyruntime.cudaGraphicsResourceGetMappedMipmappedArray(mipmappedArray._pvt_ptr, cyresource) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], mipmappedArray) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24869, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24869, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24869, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24869, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 24869, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 24869, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":24868 * with nogil: * err = cyruntime.cudaGraphicsResourceGetMappedMipmappedArray(mipmappedArray._pvt_ptr, cyresource) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], mipmappedArray) */ } /* "cuda/bindings/runtime.pyx":24870 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], mipmappedArray) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24870, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24870, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24870, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24870, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 24870, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_mipmappedArray); __Pyx_GIVEREF((PyObject *)__pyx_v_mipmappedArray); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_mipmappedArray)) != (0)) __PYX_ERR(0, 24870, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":24829 * return (_dict_cudaError_t[err], array) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphicsResourceGetMappedMipmappedArray(resource): * """ Get a mipmapped array through which to access a mapped graphics resource. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphicsResourceGetMappedMipmappedArray", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_presource); __Pyx_XDECREF((PyObject *)__pyx_v_mipmappedArray); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":24872 * return (_dict_cudaError_t[err], mipmappedArray) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGetChannelDesc(array): * """ Get the channel descriptor of an array. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_347cudaGetChannelDesc(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_346cudaGetChannelDesc, "cudaGetChannelDesc(array)\n\nGet the channel descriptor of an array.\n\nReturns in `*desc` the channel descriptor of the CUDA array `array`.\n\nParameters\n----------\narray : :py:obj:`~.cudaArray_const_t`\n Memory array on device\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\ndesc : :py:obj:`~.cudaChannelFormatDesc`\n Channel format\n\nSee Also\n--------\n:py:obj:`~.cudaCreateChannelDesc (C API)`, :py:obj:`~.cudaCreateTextureObject`, :py:obj:`~.cudaCreateSurfaceObject`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_347cudaGetChannelDesc = {"cudaGetChannelDesc", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_347cudaGetChannelDesc, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_346cudaGetChannelDesc}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_347cudaGetChannelDesc(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_array = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGetChannelDesc (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_array_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24872, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24872, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGetChannelDesc", 0) < (0)) __PYX_ERR(0, 24872, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGetChannelDesc", 1, 1, 1, i); __PYX_ERR(0, 24872, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24872, __pyx_L3_error) } __pyx_v_array = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGetChannelDesc", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 24872, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGetChannelDesc", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_346cudaGetChannelDesc(__pyx_self, __pyx_v_array); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_346cudaGetChannelDesc(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_array) { cudaArray_const_t __pyx_v_cyarray; PyObject *__pyx_v_parray = NULL; struct __pyx_obj_4cuda_8bindings_7runtime_cudaChannelFormatDesc *__pyx_v_desc = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGetChannelDesc", 0); /* "cuda/bindings/runtime.pyx":24895 * """ * cdef cyruntime.cudaArray_const_t cyarray * if array is None: # <<<<<<<<<<<<<< * parray = 0 * elif isinstance(array, (cudaArray_const_t,)): */ __pyx_t_1 = (__pyx_v_array == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24896 * cdef cyruntime.cudaArray_const_t cyarray * if array is None: * parray = 0 # <<<<<<<<<<<<<< * elif isinstance(array, (cudaArray_const_t,)): * parray = int(array) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_parray = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":24895 * """ * cdef cyruntime.cudaArray_const_t cyarray * if array is None: # <<<<<<<<<<<<<< * parray = 0 * elif isinstance(array, (cudaArray_const_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":24897 * if array is None: * parray = 0 * elif isinstance(array, (cudaArray_const_t,)): # <<<<<<<<<<<<<< * parray = int(array) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_array, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_const_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24898 * parray = 0 * elif isinstance(array, (cudaArray_const_t,)): * parray = int(array) # <<<<<<<<<<<<<< * else: * parray = int(cudaArray_const_t(array)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_array); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24898, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_parray = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":24897 * if array is None: * parray = 0 * elif isinstance(array, (cudaArray_const_t,)): # <<<<<<<<<<<<<< * parray = int(array) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":24900 * parray = int(array) * else: * parray = int(cudaArray_const_t(array)) # <<<<<<<<<<<<<< * cyarray = parray * cdef cudaChannelFormatDesc desc = cudaChannelFormatDesc() */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_const_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaArray_const_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_array}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24900, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24900, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_parray = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":24901 * else: * parray = int(cudaArray_const_t(array)) * cyarray = parray # <<<<<<<<<<<<<< * cdef cudaChannelFormatDesc desc = cudaChannelFormatDesc() * with nogil: */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_parray); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 24901, __pyx_L1_error) __pyx_v_cyarray = ((cudaArray_const_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":24902 * parray = int(cudaArray_const_t(array)) * cyarray = parray * cdef cudaChannelFormatDesc desc = cudaChannelFormatDesc() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGetChannelDesc(desc._pvt_ptr, cyarray) */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaChannelFormatDesc); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaChannelFormatDesc); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24902, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_4); } __pyx_v_desc = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaChannelFormatDesc *)__pyx_t_4); __pyx_t_4 = 0; /* "cuda/bindings/runtime.pyx":24903 * cyarray = parray * cdef cudaChannelFormatDesc desc = cudaChannelFormatDesc() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGetChannelDesc(desc._pvt_ptr, cyarray) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":24904 * cdef cudaChannelFormatDesc desc = cudaChannelFormatDesc() * with nogil: * err = cyruntime.cudaGetChannelDesc(desc._pvt_ptr, cyarray) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGetChannelDesc(((struct cudaChannelFormatDesc *)__pyx_v_desc->_pvt_ptr), __pyx_v_cyarray); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24904, __pyx_L5_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":24903 * cyarray = parray * cdef cudaChannelFormatDesc desc = cudaChannelFormatDesc() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGetChannelDesc(desc._pvt_ptr, cyarray) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L6; } __pyx_L5_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L6:; } } /* "cuda/bindings/runtime.pyx":24905 * with nogil: * err = cyruntime.cudaGetChannelDesc(desc._pvt_ptr, cyarray) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], desc) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":24906 * err = cyruntime.cudaGetChannelDesc(desc._pvt_ptr, cyarray) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], desc) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24906, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24906, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24906, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24906, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 24906, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 24906, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":24905 * with nogil: * err = cyruntime.cudaGetChannelDesc(desc._pvt_ptr, cyarray) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], desc) */ } /* "cuda/bindings/runtime.pyx":24907 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], desc) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24907, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24907, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24907, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24907, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 24907, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_desc); __Pyx_GIVEREF((PyObject *)__pyx_v_desc); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_desc)) != (0)) __PYX_ERR(0, 24907, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":24872 * return (_dict_cudaError_t[err], mipmappedArray) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGetChannelDesc(array): * """ Get the channel descriptor of an array. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGetChannelDesc", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_parray); __Pyx_XDECREF((PyObject *)__pyx_v_desc); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":24909 * return (_dict_cudaError_t[err], desc) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaCreateChannelDesc(int x, int y, int z, int w, f not None : cudaChannelFormatKind): * """ Returns a channel descriptor using the specified format. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_349cudaCreateChannelDesc(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_348cudaCreateChannelDesc, "cudaCreateChannelDesc(int x, int y, int z, int w, f: cudaChannelFormatKind)\n\nReturns a channel descriptor using the specified format.\n\nReturns a channel descriptor with format `f` and number of bits of each\ncomponent `x`, `y`, `z`, and `w`. The :py:obj:`~.cudaChannelFormatDesc`\nis defined as:\n\n**View CUDA Toolkit Documentation for a C++ code example**\n\nwhere :py:obj:`~.cudaChannelFormatKind` is one of\n:py:obj:`~.cudaChannelFormatKindSigned`,\n:py:obj:`~.cudaChannelFormatKindUnsigned`, or\n:py:obj:`~.cudaChannelFormatKindFloat`.\n\nParameters\n----------\nx : int\n X component\ny : int\n Y component\nz : int\n Z component\nw : int\n W component\nf : :py:obj:`~.cudaChannelFormatKind`\n Channel format\n\nReturns\n-------\ncudaError_t.cudaSuccess\n cudaError_t.cudaSuccess\n:py:obj:`~.cudaChannelFormatDesc`\n Channel descriptor with format `f`\n\nSee Also\n--------\ncudaCreateChannelDesc (C++ API), :py:obj:`~.cudaGetChannelDesc`, :py:obj:`~.cudaCreateTextureObject`, :py:obj:`~.cudaCreateSurfaceObject`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_349cudaCreateChannelDesc = {"cudaCreateChannelDesc", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_349cudaCreateChannelDesc, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_348cudaCreateChannelDesc}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_349cudaCreateChannelDesc(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { int __pyx_v_x; int __pyx_v_y; int __pyx_v_z; int __pyx_v_w; PyObject *__pyx_v_f = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[5] = {0,0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaCreateChannelDesc (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_x_2,&__pyx_mstate_global->__pyx_n_u_y_2,&__pyx_mstate_global->__pyx_n_u_z_2,&__pyx_mstate_global->__pyx_n_u_w_2,&__pyx_mstate_global->__pyx_n_u_f_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24909, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 5: values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 24909, __pyx_L3_error) CYTHON_FALLTHROUGH; case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 24909, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 24909, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24909, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24909, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaCreateChannelDesc", 0) < (0)) __PYX_ERR(0, 24909, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 5; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaCreateChannelDesc", 1, 5, 5, i); __PYX_ERR(0, 24909, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 5)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24909, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24909, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 24909, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 24909, __pyx_L3_error) values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 24909, __pyx_L3_error) } __pyx_v_x = __Pyx_PyLong_As_int(values[0]); if (unlikely((__pyx_v_x == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24910, __pyx_L3_error) __pyx_v_y = __Pyx_PyLong_As_int(values[1]); if (unlikely((__pyx_v_y == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24910, __pyx_L3_error) __pyx_v_z = __Pyx_PyLong_As_int(values[2]); if (unlikely((__pyx_v_z == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24910, __pyx_L3_error) __pyx_v_w = __Pyx_PyLong_As_int(values[3]); if (unlikely((__pyx_v_w == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24910, __pyx_L3_error) __pyx_v_f = values[4]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaCreateChannelDesc", 1, 5, 5, __pyx_nargs); __PYX_ERR(0, 24909, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaCreateChannelDesc", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_f) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "f"); __PYX_ERR(0, 24910, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_348cudaCreateChannelDesc(__pyx_self, __pyx_v_x, __pyx_v_y, __pyx_v_z, __pyx_v_w, __pyx_v_f); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_348cudaCreateChannelDesc(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_x, int __pyx_v_y, int __pyx_v_z, int __pyx_v_w, PyObject *__pyx_v_f) { enum cudaChannelFormatKind __pyx_v_cyf; struct cudaChannelFormatDesc __pyx_v_err; struct __pyx_obj_4cuda_8bindings_7runtime_cudaChannelFormatDesc *__pyx_v_wrapper = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; enum cudaChannelFormatKind __pyx_t_2; struct cudaChannelFormatDesc __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaCreateChannelDesc", 0); /* "cuda/bindings/runtime.pyx":24948 * cudaCreateChannelDesc (C++ API), :py:obj:`~.cudaGetChannelDesc`, :py:obj:`~.cudaCreateTextureObject`, :py:obj:`~.cudaCreateSurfaceObject` * """ * cdef cyruntime.cudaChannelFormatKind cyf = f.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaCreateChannelDesc(x, y, z, w, cyf) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_f, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24948, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = ((enum cudaChannelFormatKind)__Pyx_PyLong_As_enum__cudaChannelFormatKind(__pyx_t_1)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 24948, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cyf = __pyx_t_2; /* "cuda/bindings/runtime.pyx":24949 * """ * cdef cyruntime.cudaChannelFormatKind cyf = f.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaCreateChannelDesc(x, y, z, w, cyf) * cdef cudaChannelFormatDesc wrapper = cudaChannelFormatDesc() */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":24950 * cdef cyruntime.cudaChannelFormatKind cyf = f.value * with nogil: * err = cyruntime.cudaCreateChannelDesc(x, y, z, w, cyf) # <<<<<<<<<<<<<< * cdef cudaChannelFormatDesc wrapper = cudaChannelFormatDesc() * wrapper._pvt_ptr[0] = err */ __pyx_t_3 = __pyx_f_4cuda_8bindings_9cyruntime_cudaCreateChannelDesc(__pyx_v_x, __pyx_v_y, __pyx_v_z, __pyx_v_w, __pyx_v_cyf); if (unlikely(__Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 24950, __pyx_L4_error) __pyx_v_err = __pyx_t_3; } /* "cuda/bindings/runtime.pyx":24949 * """ * cdef cyruntime.cudaChannelFormatKind cyf = f.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaCreateChannelDesc(x, y, z, w, cyf) * cdef cudaChannelFormatDesc wrapper = cudaChannelFormatDesc() */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":24951 * with nogil: * err = cyruntime.cudaCreateChannelDesc(x, y, z, w, cyf) * cdef cudaChannelFormatDesc wrapper = cudaChannelFormatDesc() # <<<<<<<<<<<<<< * wrapper._pvt_ptr[0] = err * return (cudaError_t.cudaSuccess, wrapper) */ __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaChannelFormatDesc); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaChannelFormatDesc); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (1-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24951, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_wrapper = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaChannelFormatDesc *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":24952 * err = cyruntime.cudaCreateChannelDesc(x, y, z, w, cyf) * cdef cudaChannelFormatDesc wrapper = cudaChannelFormatDesc() * wrapper._pvt_ptr[0] = err # <<<<<<<<<<<<<< * return (cudaError_t.cudaSuccess, wrapper) * */ (__pyx_v_wrapper->_pvt_ptr[0]) = __pyx_v_err; /* "cuda/bindings/runtime.pyx":24953 * cdef cudaChannelFormatDesc wrapper = cudaChannelFormatDesc() * wrapper._pvt_ptr[0] = err * return (cudaError_t.cudaSuccess, wrapper) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24953, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_cudaSuccess); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24953, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24953, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 24953, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_wrapper); __Pyx_GIVEREF((PyObject *)__pyx_v_wrapper); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, ((PyObject *)__pyx_v_wrapper)) != (0)) __PYX_ERR(0, 24953, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":24909 * return (_dict_cudaError_t[err], desc) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaCreateChannelDesc(int x, int y, int z, int w, f not None : cudaChannelFormatKind): * """ Returns a channel descriptor using the specified format. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaCreateChannelDesc", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_wrapper); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":24955 * return (cudaError_t.cudaSuccess, wrapper) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaCreateTextureObject(pResDesc : Optional[cudaResourceDesc], pTexDesc : Optional[cudaTextureDesc], pResViewDesc : Optional[cudaResourceViewDesc]): * """ Creates a texture object. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_351cudaCreateTextureObject(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_350cudaCreateTextureObject, "cudaCreateTextureObject(cudaResourceDesc pResDesc: Optional[cudaResourceDesc], cudaTextureDesc pTexDesc: Optional[cudaTextureDesc], cudaResourceViewDesc pResViewDesc: Optional[cudaResourceViewDesc])\n\nCreates a texture object.\n\nCreates a texture object and returns it in `pTexObject`. `pResDesc`\ndescribes the data to texture from. `pTexDesc` describes how the data\nshould be sampled. `pResViewDesc` is an optional argument that\nspecifies an alternate format for the data described by `pResDesc`, and\nalso describes the subresource region to restrict access to when\ntexturing. `pResViewDesc` can only be specified if the type of resource\nis a CUDA array or a CUDA mipmapped array not in a block compressed\nformat.\n\nTexture objects are only supported on devices of compute capability 3.0\nor higher. Additionally, a texture object is an opaque value, and, as\nsuch, should only be accessed through CUDA API calls.\n\nThe :py:obj:`~.cudaResourceDesc` structure is defined as:\n\n**View CUDA Toolkit Documentation for a C++ code example**\n\nwhere:\n\n- :py:obj:`~.cudaResourceDesc.resType` specifies the type of resource\n to texture from. CUresourceType is defined as:\n\n- **View CUDA Toolkit Documentation for a C++ code example**\n\nIf :py:obj:`~.cudaResourceDesc.resType` is set to\n:py:obj:`~.cudaResourceTypeArray`,\n:py:obj:`~.cudaResourceDesc`::res::array::array must be set to a valid\nCUDA array handle.\n\nIf :py:obj:`~.cudaResourceDesc.resType` is set to\n:py:obj:`~.cudaResourceTypeMipmappedArray`,\n:py:obj:`~.cudaResourceDesc`::res::mipmap::mipmap must be set to a\nvalid CUDA mipmapped array handle and\n:py:obj:`~.cudaTextureDesc.normalizedCoords` must be set to true.\n\nIf :py:obj:`~.cudaResourceDesc.resType` is set to\n:py:obj:`~.cudaResourceTypeLinear`,\n:py:obj:`~.cudaResourceDesc`::res::linear::devPtr must be set to a\nvalid device pointer, that is aligned to\n:py:obj:`~.cudaDeviceProp.textureAlignment`.\n:py:obj:`~.cudaResourceDesc`::res::linear::desc describ""es the format\nand the number of components per array element.\n:py:obj:`~.cudaResourceDesc`::res::linear::sizeInBytes specifies the\nsize of the array in bytes. The total number of elements in the linear\naddress range cannot exceed\n:py:obj:`~.cudaDeviceProp.maxTexture1DLinear`. The number of elements\nis computed as (sizeInBytes / sizeof(desc)).\n\nIf :py:obj:`~.cudaResourceDesc.resType` is set to\n:py:obj:`~.cudaResourceTypePitch2D`,\n:py:obj:`~.cudaResourceDesc`::res::pitch2D::devPtr must be set to a\nvalid device pointer, that is aligned to\n:py:obj:`~.cudaDeviceProp.textureAlignment`.\n:py:obj:`~.cudaResourceDesc`::res::pitch2D::desc describes the format\nand the number of components per array element.\n:py:obj:`~.cudaResourceDesc`::res::pitch2D::width and\n:py:obj:`~.cudaResourceDesc`::res::pitch2D::height specify the width\nand height of the array in elements, and cannot exceed\n:py:obj:`~.cudaDeviceProp.maxTexture2DLinear`[0] and\n:py:obj:`~.cudaDeviceProp.maxTexture2DLinear`[1] respectively.\n:py:obj:`~.cudaResourceDesc`::res::pitch2D::pitchInBytes specifies the\npitch between two rows in bytes and has to be aligned to\n:py:obj:`~.cudaDeviceProp.texturePitchAlignment`. Pitch cannot exceed\n:py:obj:`~.cudaDeviceProp.maxTexture2DLinear`[2].\n\nThe :py:obj:`~.cudaTextureDesc` struct is defined as\n\n**View CUDA Toolkit Documentation for a C++ code example**\n\nwhere\n\n- :py:obj:`~.cudaTextureDesc.addressMode` specifies the addressing mode\n for each dimension of the texture data.\n :py:obj:`~.cudaTextureAddressMode` is defined as:\n\n- **View CUDA Toolkit Documentation for a C++ code example**\n\n- This is ignored if :py:obj:`~.cudaResourceDesc.resType` is\n :py:obj:`~.cudaResourceTypeLinear`. Also, if\n :py:obj:`~.cudaTextureDesc.normalizedCoords` is set to zero,\n :py:obj:`~.cudaAddressModeWrap` and :py:obj:`~.cudaAddressModeMirror`\n won't be supported and will be switched to\n :py:obj:`~.cudaAddressModeClamp`.\n\n- :py:obj:`~.cudaTextureDesc.filt""erMode` specifies the filtering mode\n to be used when fetching from the texture.\n :py:obj:`~.cudaTextureFilterMode` is defined as:\n\n- **View CUDA Toolkit Documentation for a C++ code example**\n\n- This is ignored if :py:obj:`~.cudaResourceDesc.resType` is\n :py:obj:`~.cudaResourceTypeLinear`.\n\n- :py:obj:`~.cudaTextureDesc.readMode` specifies whether integer data\n should be converted to floating point or not.\n :py:obj:`~.cudaTextureReadMode` is defined as:\n\n- **View CUDA Toolkit Documentation for a C++ code example**\n\n- Note that this applies only to 8-bit and 16-bit integer formats.\n 32-bit integer format would not be promoted, regardless of whether or\n not this :py:obj:`~.cudaTextureDesc.readMode` is set\n :py:obj:`~.cudaReadModeNormalizedFloat` is specified.\n\n- :py:obj:`~.cudaTextureDesc.sRGB` specifies whether sRGB to linear\n conversion should be performed during texture fetch.\n\n- :py:obj:`~.cudaTextureDesc.borderColor` specifies the float values of\n color. where: :py:obj:`~.cudaTextureDesc.borderColor`[0] contains\n value of 'R', :py:obj:`~.cudaTextureDesc.borderColor`[1] contains\n value of 'G', :py:obj:`~.cudaTextureDesc.borderColor`[2] contains\n value of 'B', :py:obj:`~.cudaTextureDesc.borderColor`[3] contains\n value of 'A' Note that application using integer border color values\n will need to these values to float. The values are\n set only when the addressing mode specified by\n :py:obj:`~.cudaTextureDesc.addressMode` is cudaAddressModeBorder.\n\n- :py:obj:`~.cudaTextureDesc.normalizedCoords` specifies whether the\n texture coordinates will be normalized or not.\n\n- :py:obj:`~.cudaTextureDesc.maxAnisotropy` specifies the maximum\n anistropy ratio to be used when doing anisotropic filtering. This\n value will be clamped to the range [1,16].\n\n- :py:obj:`~.cudaTextureDesc.mipmapFilterMode` specifies the filter\n mode when the calculated mipmap level lies between two defined mipmap\n levels.""\n\n- :py:obj:`~.cudaTextureDesc.mipmapLevelBias` specifies the offset to\n be applied to the calculated mipmap level.\n\n- :py:obj:`~.cudaTextureDesc.minMipmapLevelClamp` specifies the lower\n end of the mipmap level range to clamp access to.\n\n- :py:obj:`~.cudaTextureDesc.maxMipmapLevelClamp` specifies the upper\n end of the mipmap level range to clamp access to.\n\n- :py:obj:`~.cudaTextureDesc.disableTrilinearOptimization` specifies\n whether the trilinear filtering optimizations will be disabled.\n\n- :py:obj:`~.cudaTextureDesc.seamlessCubemap` specifies whether\n seamless cube map filtering is enabled. This flag can only be\n specified if the underlying resource is a CUDA array or a CUDA\n mipmapped array that was created with the flag\n :py:obj:`~.cudaArrayCubemap`. When seamless cube map filtering is\n enabled, texture address modes specified by\n :py:obj:`~.cudaTextureDesc.addressMode` are ignored. Instead, if the\n :py:obj:`~.cudaTextureDesc.filterMode` is set to\n :py:obj:`~.cudaFilterModePoint` the address mode\n :py:obj:`~.cudaAddressModeClamp` will be applied for all dimensions.\n If the :py:obj:`~.cudaTextureDesc.filterMode` is set to\n :py:obj:`~.cudaFilterModeLinear` seamless cube map filtering will be\n performed when sampling along the cube face borders.\n\nThe :py:obj:`~.cudaResourceViewDesc` struct is defined as\n\n**View CUDA Toolkit Documentation for a C++ code example**\n\nwhere:\n\n- :py:obj:`~.cudaResourceViewDesc.format` specifies how the data\n contained in the CUDA array or CUDA mipmapped array should be\n interpreted. Note that this can incur a change in size of the texture\n data. If the resource view format is a block compressed format, then\n the underlying CUDA array or CUDA mipmapped array has to have a\n 32-bit unsigned integer format with 2 or 4 channels, depending on the\n block compressed format. For ex., BC1 and BC4 require the underlying\n CUDA array to have a 32-bit unsigned int with 2 channels. The ot""her\n BC formats require the underlying resource to have the same 32-bit\n unsigned int format but with 4 channels.\n\n- :py:obj:`~.cudaResourceViewDesc.width` specifies the new width of the\n texture data. If the resource view format is a block compressed\n format, this value has to be 4 times the original width of the\n resource. For non block compressed formats, this value has to be\n equal to that of the original resource.\n\n- :py:obj:`~.cudaResourceViewDesc.height` specifies the new height of\n the texture data. If the resource view format is a block compressed\n format, this value has to be 4 times the original height of the\n resource. For non block compressed formats, this value has to be\n equal to that of the original resource.\n\n- :py:obj:`~.cudaResourceViewDesc.depth` specifies the new depth of the\n texture data. This value has to be equal to that of the original\n resource.\n\n- :py:obj:`~.cudaResourceViewDesc.firstMipmapLevel` specifies the most\n detailed mipmap level. This will be the new mipmap level zero. For\n non-mipmapped resources, this value has to be\n zero.:py:obj:`~.cudaTextureDesc.minMipmapLevelClamp` and\n :py:obj:`~.cudaTextureDesc.maxMipmapLevelClamp` will be relative to\n this value. For ex., if the firstMipmapLevel is set to 2, and a\n minMipmapLevelClamp of 1.2 is specified, then the actual minimum\n mipmap level clamp will be 3.2.\n\n- :py:obj:`~.cudaResourceViewDesc.lastMipmapLevel` specifies the least\n detailed mipmap level. For non-mipmapped resources, this value has to\n be zero.\n\n- :py:obj:`~.cudaResourceViewDesc.firstLayer` specifies the first layer\n index for layered textures. This will be the new layer zero. For non-\n layered resources, this value has to be zero.\n\n- :py:obj:`~.cudaResourceViewDesc.lastLayer` specifies the last layer\n index for layered textures. For non-layered resources, this value has\n to be zero.\n\nParameters\n----------\npResDesc : :py:obj:`~.cudaResourceDesc`\n Re""source descriptor\npTexDesc : :py:obj:`~.cudaTextureDesc`\n Texture descriptor\npResViewDesc : :py:obj:`~.cudaResourceViewDesc`\n Resource view descriptor\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\npTexObject : :py:obj:`~.cudaTextureObject_t`\n Texture object to create\n\nSee Also\n--------\n:py:obj:`~.cudaDestroyTextureObject`, :py:obj:`~.cuTexObjectCreate`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_351cudaCreateTextureObject = {"cudaCreateTextureObject", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_351cudaCreateTextureObject, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_350cudaCreateTextureObject}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_351cudaCreateTextureObject(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaResourceDesc *__pyx_v_pResDesc = 0; struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_pTexDesc = 0; struct __pyx_obj_4cuda_8bindings_7runtime_cudaResourceViewDesc *__pyx_v_pResViewDesc = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[3] = {0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaCreateTextureObject (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pResDesc,&__pyx_mstate_global->__pyx_n_u_pTexDesc,&__pyx_mstate_global->__pyx_n_u_pResViewDesc,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 24955, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 24955, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24955, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24955, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaCreateTextureObject", 0) < (0)) __PYX_ERR(0, 24955, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 3; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaCreateTextureObject", 1, 3, 3, i); __PYX_ERR(0, 24955, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 3)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 24955, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 24955, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 24955, __pyx_L3_error) } __pyx_v_pResDesc = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaResourceDesc *)values[0]); __pyx_v_pTexDesc = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)values[1]); __pyx_v_pResViewDesc = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaResourceViewDesc *)values[2]); } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaCreateTextureObject", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 24955, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaCreateTextureObject", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_pResDesc), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaResourceDesc, 1, "pResDesc", 0))) __PYX_ERR(0, 24956, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_pTexDesc), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaTextureDesc, 1, "pTexDesc", 0))) __PYX_ERR(0, 24956, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_pResViewDesc), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaResourceViewDesc, 1, "pResViewDesc", 0))) __PYX_ERR(0, 24956, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_350cudaCreateTextureObject(__pyx_self, __pyx_v_pResDesc, __pyx_v_pTexDesc, __pyx_v_pResViewDesc); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_350cudaCreateTextureObject(CYTHON_UNUSED PyObject *__pyx_self, struct __pyx_obj_4cuda_8bindings_7runtime_cudaResourceDesc *__pyx_v_pResDesc, struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_pTexDesc, struct __pyx_obj_4cuda_8bindings_7runtime_cudaResourceViewDesc *__pyx_v_pResViewDesc) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureObject_t *__pyx_v_pTexObject = 0; struct cudaResourceDesc *__pyx_v_cypResDesc_ptr; struct cudaTextureDesc *__pyx_v_cypTexDesc_ptr; struct cudaResourceViewDesc *__pyx_v_cypResViewDesc_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; struct cudaResourceDesc *__pyx_t_5; int __pyx_t_6; struct cudaTextureDesc *__pyx_t_7; struct cudaResourceViewDesc *__pyx_t_8; cudaError_t __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaCreateTextureObject", 0); /* "cuda/bindings/runtime.pyx":25187 * :py:obj:`~.cudaDestroyTextureObject`, :py:obj:`~.cuTexObjectCreate` * """ * cdef cudaTextureObject_t pTexObject = cudaTextureObject_t() # <<<<<<<<<<<<<< * cdef cyruntime.cudaResourceDesc* cypResDesc_ptr = pResDesc._pvt_ptr if pResDesc is not None else NULL * cdef cyruntime.cudaTextureDesc* cypTexDesc_ptr = pTexDesc._pvt_ptr if pTexDesc is not None else NULL */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaTextureObject_t); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaTextureObject_t); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (1-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25187, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_pTexObject = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureObject_t *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":25188 * """ * cdef cudaTextureObject_t pTexObject = cudaTextureObject_t() * cdef cyruntime.cudaResourceDesc* cypResDesc_ptr = pResDesc._pvt_ptr if pResDesc is not None else NULL # <<<<<<<<<<<<<< * cdef cyruntime.cudaTextureDesc* cypTexDesc_ptr = pTexDesc._pvt_ptr if pTexDesc is not None else NULL * cdef cyruntime.cudaResourceViewDesc* cypResViewDesc_ptr = pResViewDesc._pvt_ptr if pResViewDesc is not None else NULL */ __pyx_t_6 = (((PyObject *)__pyx_v_pResDesc) != Py_None); if (__pyx_t_6) { __pyx_t_5 = __pyx_v_pResDesc->_pvt_ptr; } else { __pyx_t_5 = NULL; } __pyx_v_cypResDesc_ptr = __pyx_t_5; /* "cuda/bindings/runtime.pyx":25189 * cdef cudaTextureObject_t pTexObject = cudaTextureObject_t() * cdef cyruntime.cudaResourceDesc* cypResDesc_ptr = pResDesc._pvt_ptr if pResDesc is not None else NULL * cdef cyruntime.cudaTextureDesc* cypTexDesc_ptr = pTexDesc._pvt_ptr if pTexDesc is not None else NULL # <<<<<<<<<<<<<< * cdef cyruntime.cudaResourceViewDesc* cypResViewDesc_ptr = pResViewDesc._pvt_ptr if pResViewDesc is not None else NULL * with nogil: */ __pyx_t_6 = (((PyObject *)__pyx_v_pTexDesc) != Py_None); if (__pyx_t_6) { __pyx_t_7 = __pyx_v_pTexDesc->_pvt_ptr; } else { __pyx_t_7 = NULL; } __pyx_v_cypTexDesc_ptr = __pyx_t_7; /* "cuda/bindings/runtime.pyx":25190 * cdef cyruntime.cudaResourceDesc* cypResDesc_ptr = pResDesc._pvt_ptr if pResDesc is not None else NULL * cdef cyruntime.cudaTextureDesc* cypTexDesc_ptr = pTexDesc._pvt_ptr if pTexDesc is not None else NULL * cdef cyruntime.cudaResourceViewDesc* cypResViewDesc_ptr = pResViewDesc._pvt_ptr if pResViewDesc is not None else NULL # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaCreateTextureObject(pTexObject._pvt_ptr, cypResDesc_ptr, cypTexDesc_ptr, cypResViewDesc_ptr) */ __pyx_t_6 = (((PyObject *)__pyx_v_pResViewDesc) != Py_None); if (__pyx_t_6) { __pyx_t_8 = __pyx_v_pResViewDesc->_pvt_ptr; } else { __pyx_t_8 = NULL; } __pyx_v_cypResViewDesc_ptr = __pyx_t_8; /* "cuda/bindings/runtime.pyx":25191 * cdef cyruntime.cudaTextureDesc* cypTexDesc_ptr = pTexDesc._pvt_ptr if pTexDesc is not None else NULL * cdef cyruntime.cudaResourceViewDesc* cypResViewDesc_ptr = pResViewDesc._pvt_ptr if pResViewDesc is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaCreateTextureObject(pTexObject._pvt_ptr, cypResDesc_ptr, cypTexDesc_ptr, cypResViewDesc_ptr) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":25192 * cdef cyruntime.cudaResourceViewDesc* cypResViewDesc_ptr = pResViewDesc._pvt_ptr if pResViewDesc is not None else NULL * with nogil: * err = cyruntime.cudaCreateTextureObject(pTexObject._pvt_ptr, cypResDesc_ptr, cypTexDesc_ptr, cypResViewDesc_ptr) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_9 = __pyx_f_4cuda_8bindings_9cyruntime_cudaCreateTextureObject(((cudaTextureObject_t *)__pyx_v_pTexObject->_pvt_ptr), __pyx_v_cypResDesc_ptr, __pyx_v_cypTexDesc_ptr, __pyx_v_cypResViewDesc_ptr); if (unlikely(__pyx_t_9 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25192, __pyx_L4_error) __pyx_v_err = __pyx_t_9; } /* "cuda/bindings/runtime.pyx":25191 * cdef cyruntime.cudaTextureDesc* cypTexDesc_ptr = pTexDesc._pvt_ptr if pTexDesc is not None else NULL * cdef cyruntime.cudaResourceViewDesc* cypResViewDesc_ptr = pResViewDesc._pvt_ptr if pResViewDesc is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaCreateTextureObject(pTexObject._pvt_ptr, cypResDesc_ptr, cypTexDesc_ptr, cypResViewDesc_ptr) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":25193 * with nogil: * err = cyruntime.cudaCreateTextureObject(pTexObject._pvt_ptr, cypResDesc_ptr, cypTexDesc_ptr, cypResViewDesc_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pTexObject) */ __pyx_t_6 = (__pyx_v_err != cudaSuccess); if (__pyx_t_6) { /* "cuda/bindings/runtime.pyx":25194 * err = cyruntime.cudaCreateTextureObject(pTexObject._pvt_ptr, cypResDesc_ptr, cypTexDesc_ptr, cypResViewDesc_ptr) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], pTexObject) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25194, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25194, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25194, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25194, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 25194, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 25194, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":25193 * with nogil: * err = cyruntime.cudaCreateTextureObject(pTexObject._pvt_ptr, cypResDesc_ptr, cypTexDesc_ptr, cypResViewDesc_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pTexObject) */ } /* "cuda/bindings/runtime.pyx":25195 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pTexObject) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25195, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25195, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25195, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25195, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 25195, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_pTexObject); __Pyx_GIVEREF((PyObject *)__pyx_v_pTexObject); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_pTexObject)) != (0)) __PYX_ERR(0, 25195, __pyx_L1_error); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":24955 * return (cudaError_t.cudaSuccess, wrapper) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaCreateTextureObject(pResDesc : Optional[cudaResourceDesc], pTexDesc : Optional[cudaTextureDesc], pResViewDesc : Optional[cudaResourceViewDesc]): * """ Creates a texture object. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaCreateTextureObject", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_pTexObject); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":25197 * return (_dict_cudaError_t[err], pTexObject) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDestroyTextureObject(texObject): * """ Destroys a texture object. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_353cudaDestroyTextureObject(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_352cudaDestroyTextureObject, "cudaDestroyTextureObject(texObject)\n\nDestroys a texture object.\n\nDestroys the texture object specified by `texObject`.\n\nParameters\n----------\ntexObject : :py:obj:`~.cudaTextureObject_t`\n Texture object to destroy\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\n\nSee Also\n--------\n:py:obj:`~.cudaCreateTextureObject`, :py:obj:`~.cuTexObjectDestroy`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_353cudaDestroyTextureObject = {"cudaDestroyTextureObject", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_353cudaDestroyTextureObject, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_352cudaDestroyTextureObject}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_353cudaDestroyTextureObject(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_texObject = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaDestroyTextureObject (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_texObject,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25197, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25197, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaDestroyTextureObject", 0) < (0)) __PYX_ERR(0, 25197, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaDestroyTextureObject", 1, 1, 1, i); __PYX_ERR(0, 25197, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25197, __pyx_L3_error) } __pyx_v_texObject = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaDestroyTextureObject", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 25197, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaDestroyTextureObject", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_352cudaDestroyTextureObject(__pyx_self, __pyx_v_texObject); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_352cudaDestroyTextureObject(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_texObject) { cudaTextureObject_t __pyx_v_cytexObject; PyObject *__pyx_v_ptexObject = NULL; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaDestroyTextureObject", 0); /* "cuda/bindings/runtime.pyx":25218 * """ * cdef cyruntime.cudaTextureObject_t cytexObject * if texObject is None: # <<<<<<<<<<<<<< * ptexObject = 0 * elif isinstance(texObject, (cudaTextureObject_t,)): */ __pyx_t_1 = (__pyx_v_texObject == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":25219 * cdef cyruntime.cudaTextureObject_t cytexObject * if texObject is None: * ptexObject = 0 # <<<<<<<<<<<<<< * elif isinstance(texObject, (cudaTextureObject_t,)): * ptexObject = int(texObject) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_ptexObject = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":25218 * """ * cdef cyruntime.cudaTextureObject_t cytexObject * if texObject is None: # <<<<<<<<<<<<<< * ptexObject = 0 * elif isinstance(texObject, (cudaTextureObject_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":25220 * if texObject is None: * ptexObject = 0 * elif isinstance(texObject, (cudaTextureObject_t,)): # <<<<<<<<<<<<<< * ptexObject = int(texObject) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_texObject, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaTextureObject_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":25221 * ptexObject = 0 * elif isinstance(texObject, (cudaTextureObject_t,)): * ptexObject = int(texObject) # <<<<<<<<<<<<<< * else: * ptexObject = int(cudaTextureObject_t(texObject)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_texObject); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25221, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_ptexObject = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":25220 * if texObject is None: * ptexObject = 0 * elif isinstance(texObject, (cudaTextureObject_t,)): # <<<<<<<<<<<<<< * ptexObject = int(texObject) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":25223 * ptexObject = int(texObject) * else: * ptexObject = int(cudaTextureObject_t(texObject)) # <<<<<<<<<<<<<< * cytexObject = ptexObject * with nogil: */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaTextureObject_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaTextureObject_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_texObject}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25223, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25223, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_ptexObject = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":25224 * else: * ptexObject = int(cudaTextureObject_t(texObject)) * cytexObject = ptexObject # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaDestroyTextureObject(cytexObject) */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_ptexObject); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 25224, __pyx_L1_error) __pyx_v_cytexObject = ((cudaTextureObject_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":25225 * ptexObject = int(cudaTextureObject_t(texObject)) * cytexObject = ptexObject * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDestroyTextureObject(cytexObject) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":25226 * cytexObject = ptexObject * with nogil: * err = cyruntime.cudaDestroyTextureObject(cytexObject) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaDestroyTextureObject(__pyx_v_cytexObject); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25226, __pyx_L5_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":25225 * ptexObject = int(cudaTextureObject_t(texObject)) * cytexObject = ptexObject * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDestroyTextureObject(cytexObject) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L6; } __pyx_L5_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L6:; } } /* "cuda/bindings/runtime.pyx":25227 * with nogil: * err = cyruntime.cudaDestroyTextureObject(cytexObject) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25227, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25227, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25227, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25227, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 25227, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":25197 * return (_dict_cudaError_t[err], pTexObject) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDestroyTextureObject(texObject): * """ Destroys a texture object. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaDestroyTextureObject", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_ptexObject); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":25229 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGetTextureObjectResourceDesc(texObject): * """ Returns a texture object's resource descriptor. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_355cudaGetTextureObjectResourceDesc(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_354cudaGetTextureObjectResourceDesc, "cudaGetTextureObjectResourceDesc(texObject)\n\nReturns a texture object's resource descriptor.\n\nReturns the resource descriptor for the texture object specified by\n`texObject`.\n\nParameters\n----------\ntexObject : :py:obj:`~.cudaTextureObject_t`\n Texture object\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\npResDesc : :py:obj:`~.cudaResourceDesc`\n Resource descriptor\n\nSee Also\n--------\n:py:obj:`~.cudaCreateTextureObject`, :py:obj:`~.cuTexObjectGetResourceDesc`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_355cudaGetTextureObjectResourceDesc = {"cudaGetTextureObjectResourceDesc", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_355cudaGetTextureObjectResourceDesc, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_354cudaGetTextureObjectResourceDesc}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_355cudaGetTextureObjectResourceDesc(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_texObject = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGetTextureObjectResourceDesc (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_texObject,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25229, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25229, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGetTextureObjectResourceDesc", 0) < (0)) __PYX_ERR(0, 25229, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGetTextureObjectResourceDesc", 1, 1, 1, i); __PYX_ERR(0, 25229, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25229, __pyx_L3_error) } __pyx_v_texObject = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGetTextureObjectResourceDesc", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 25229, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGetTextureObjectResourceDesc", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_354cudaGetTextureObjectResourceDesc(__pyx_self, __pyx_v_texObject); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_354cudaGetTextureObjectResourceDesc(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_texObject) { cudaTextureObject_t __pyx_v_cytexObject; PyObject *__pyx_v_ptexObject = NULL; struct __pyx_obj_4cuda_8bindings_7runtime_cudaResourceDesc *__pyx_v_pResDesc = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGetTextureObjectResourceDesc", 0); /* "cuda/bindings/runtime.pyx":25253 * """ * cdef cyruntime.cudaTextureObject_t cytexObject * if texObject is None: # <<<<<<<<<<<<<< * ptexObject = 0 * elif isinstance(texObject, (cudaTextureObject_t,)): */ __pyx_t_1 = (__pyx_v_texObject == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":25254 * cdef cyruntime.cudaTextureObject_t cytexObject * if texObject is None: * ptexObject = 0 # <<<<<<<<<<<<<< * elif isinstance(texObject, (cudaTextureObject_t,)): * ptexObject = int(texObject) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_ptexObject = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":25253 * """ * cdef cyruntime.cudaTextureObject_t cytexObject * if texObject is None: # <<<<<<<<<<<<<< * ptexObject = 0 * elif isinstance(texObject, (cudaTextureObject_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":25255 * if texObject is None: * ptexObject = 0 * elif isinstance(texObject, (cudaTextureObject_t,)): # <<<<<<<<<<<<<< * ptexObject = int(texObject) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_texObject, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaTextureObject_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":25256 * ptexObject = 0 * elif isinstance(texObject, (cudaTextureObject_t,)): * ptexObject = int(texObject) # <<<<<<<<<<<<<< * else: * ptexObject = int(cudaTextureObject_t(texObject)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_texObject); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25256, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_ptexObject = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":25255 * if texObject is None: * ptexObject = 0 * elif isinstance(texObject, (cudaTextureObject_t,)): # <<<<<<<<<<<<<< * ptexObject = int(texObject) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":25258 * ptexObject = int(texObject) * else: * ptexObject = int(cudaTextureObject_t(texObject)) # <<<<<<<<<<<<<< * cytexObject = ptexObject * cdef cudaResourceDesc pResDesc = cudaResourceDesc() */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaTextureObject_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaTextureObject_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_texObject}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25258, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_ptexObject = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":25259 * else: * ptexObject = int(cudaTextureObject_t(texObject)) * cytexObject = ptexObject # <<<<<<<<<<<<<< * cdef cudaResourceDesc pResDesc = cudaResourceDesc() * with nogil: */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_ptexObject); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 25259, __pyx_L1_error) __pyx_v_cytexObject = ((cudaTextureObject_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":25260 * ptexObject = int(cudaTextureObject_t(texObject)) * cytexObject = ptexObject * cdef cudaResourceDesc pResDesc = cudaResourceDesc() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGetTextureObjectResourceDesc(pResDesc._pvt_ptr, cytexObject) */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaResourceDesc); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaResourceDesc); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25260, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_4); } __pyx_v_pResDesc = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaResourceDesc *)__pyx_t_4); __pyx_t_4 = 0; /* "cuda/bindings/runtime.pyx":25261 * cytexObject = ptexObject * cdef cudaResourceDesc pResDesc = cudaResourceDesc() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGetTextureObjectResourceDesc(pResDesc._pvt_ptr, cytexObject) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":25262 * cdef cudaResourceDesc pResDesc = cudaResourceDesc() * with nogil: * err = cyruntime.cudaGetTextureObjectResourceDesc(pResDesc._pvt_ptr, cytexObject) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGetTextureObjectResourceDesc(((struct cudaResourceDesc *)__pyx_v_pResDesc->_pvt_ptr), __pyx_v_cytexObject); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25262, __pyx_L5_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":25261 * cytexObject = ptexObject * cdef cudaResourceDesc pResDesc = cudaResourceDesc() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGetTextureObjectResourceDesc(pResDesc._pvt_ptr, cytexObject) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L6; } __pyx_L5_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L6:; } } /* "cuda/bindings/runtime.pyx":25263 * with nogil: * err = cyruntime.cudaGetTextureObjectResourceDesc(pResDesc._pvt_ptr, cytexObject) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pResDesc) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":25264 * err = cyruntime.cudaGetTextureObjectResourceDesc(pResDesc._pvt_ptr, cytexObject) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], pResDesc) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25264, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25264, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25264, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25264, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 25264, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 25264, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":25263 * with nogil: * err = cyruntime.cudaGetTextureObjectResourceDesc(pResDesc._pvt_ptr, cytexObject) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pResDesc) */ } /* "cuda/bindings/runtime.pyx":25265 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pResDesc) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25265, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25265, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25265, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25265, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 25265, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_pResDesc); __Pyx_GIVEREF((PyObject *)__pyx_v_pResDesc); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_pResDesc)) != (0)) __PYX_ERR(0, 25265, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":25229 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGetTextureObjectResourceDesc(texObject): * """ Returns a texture object's resource descriptor. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGetTextureObjectResourceDesc", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_ptexObject); __Pyx_XDECREF((PyObject *)__pyx_v_pResDesc); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":25267 * return (_dict_cudaError_t[err], pResDesc) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGetTextureObjectTextureDesc(texObject): * """ Returns a texture object's texture descriptor. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_357cudaGetTextureObjectTextureDesc(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_356cudaGetTextureObjectTextureDesc, "cudaGetTextureObjectTextureDesc(texObject)\n\nReturns a texture object's texture descriptor.\n\nReturns the texture descriptor for the texture object specified by\n`texObject`.\n\nParameters\n----------\ntexObject : :py:obj:`~.cudaTextureObject_t`\n Texture object\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\npTexDesc : :py:obj:`~.cudaTextureDesc`\n Texture descriptor\n\nSee Also\n--------\n:py:obj:`~.cudaCreateTextureObject`, :py:obj:`~.cuTexObjectGetTextureDesc`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_357cudaGetTextureObjectTextureDesc = {"cudaGetTextureObjectTextureDesc", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_357cudaGetTextureObjectTextureDesc, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_356cudaGetTextureObjectTextureDesc}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_357cudaGetTextureObjectTextureDesc(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_texObject = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGetTextureObjectTextureDesc (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_texObject,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25267, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25267, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGetTextureObjectTextureDesc", 0) < (0)) __PYX_ERR(0, 25267, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGetTextureObjectTextureDesc", 1, 1, 1, i); __PYX_ERR(0, 25267, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25267, __pyx_L3_error) } __pyx_v_texObject = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGetTextureObjectTextureDesc", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 25267, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGetTextureObjectTextureDesc", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_356cudaGetTextureObjectTextureDesc(__pyx_self, __pyx_v_texObject); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_356cudaGetTextureObjectTextureDesc(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_texObject) { cudaTextureObject_t __pyx_v_cytexObject; PyObject *__pyx_v_ptexObject = NULL; struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *__pyx_v_pTexDesc = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGetTextureObjectTextureDesc", 0); /* "cuda/bindings/runtime.pyx":25291 * """ * cdef cyruntime.cudaTextureObject_t cytexObject * if texObject is None: # <<<<<<<<<<<<<< * ptexObject = 0 * elif isinstance(texObject, (cudaTextureObject_t,)): */ __pyx_t_1 = (__pyx_v_texObject == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":25292 * cdef cyruntime.cudaTextureObject_t cytexObject * if texObject is None: * ptexObject = 0 # <<<<<<<<<<<<<< * elif isinstance(texObject, (cudaTextureObject_t,)): * ptexObject = int(texObject) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_ptexObject = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":25291 * """ * cdef cyruntime.cudaTextureObject_t cytexObject * if texObject is None: # <<<<<<<<<<<<<< * ptexObject = 0 * elif isinstance(texObject, (cudaTextureObject_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":25293 * if texObject is None: * ptexObject = 0 * elif isinstance(texObject, (cudaTextureObject_t,)): # <<<<<<<<<<<<<< * ptexObject = int(texObject) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_texObject, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaTextureObject_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":25294 * ptexObject = 0 * elif isinstance(texObject, (cudaTextureObject_t,)): * ptexObject = int(texObject) # <<<<<<<<<<<<<< * else: * ptexObject = int(cudaTextureObject_t(texObject)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_texObject); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25294, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_ptexObject = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":25293 * if texObject is None: * ptexObject = 0 * elif isinstance(texObject, (cudaTextureObject_t,)): # <<<<<<<<<<<<<< * ptexObject = int(texObject) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":25296 * ptexObject = int(texObject) * else: * ptexObject = int(cudaTextureObject_t(texObject)) # <<<<<<<<<<<<<< * cytexObject = ptexObject * cdef cudaTextureDesc pTexDesc = cudaTextureDesc() */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaTextureObject_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaTextureObject_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_texObject}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25296, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25296, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_ptexObject = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":25297 * else: * ptexObject = int(cudaTextureObject_t(texObject)) * cytexObject = ptexObject # <<<<<<<<<<<<<< * cdef cudaTextureDesc pTexDesc = cudaTextureDesc() * with nogil: */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_ptexObject); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 25297, __pyx_L1_error) __pyx_v_cytexObject = ((cudaTextureObject_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":25298 * ptexObject = int(cudaTextureObject_t(texObject)) * cytexObject = ptexObject * cdef cudaTextureDesc pTexDesc = cudaTextureDesc() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGetTextureObjectTextureDesc(pTexDesc._pvt_ptr, cytexObject) */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaTextureDesc); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaTextureDesc); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25298, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_4); } __pyx_v_pTexDesc = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaTextureDesc *)__pyx_t_4); __pyx_t_4 = 0; /* "cuda/bindings/runtime.pyx":25299 * cytexObject = ptexObject * cdef cudaTextureDesc pTexDesc = cudaTextureDesc() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGetTextureObjectTextureDesc(pTexDesc._pvt_ptr, cytexObject) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":25300 * cdef cudaTextureDesc pTexDesc = cudaTextureDesc() * with nogil: * err = cyruntime.cudaGetTextureObjectTextureDesc(pTexDesc._pvt_ptr, cytexObject) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGetTextureObjectTextureDesc(((struct cudaTextureDesc *)__pyx_v_pTexDesc->_pvt_ptr), __pyx_v_cytexObject); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25300, __pyx_L5_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":25299 * cytexObject = ptexObject * cdef cudaTextureDesc pTexDesc = cudaTextureDesc() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGetTextureObjectTextureDesc(pTexDesc._pvt_ptr, cytexObject) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L6; } __pyx_L5_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L6:; } } /* "cuda/bindings/runtime.pyx":25301 * with nogil: * err = cyruntime.cudaGetTextureObjectTextureDesc(pTexDesc._pvt_ptr, cytexObject) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pTexDesc) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":25302 * err = cyruntime.cudaGetTextureObjectTextureDesc(pTexDesc._pvt_ptr, cytexObject) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], pTexDesc) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25302, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25302, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25302, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25302, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 25302, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 25302, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":25301 * with nogil: * err = cyruntime.cudaGetTextureObjectTextureDesc(pTexDesc._pvt_ptr, cytexObject) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pTexDesc) */ } /* "cuda/bindings/runtime.pyx":25303 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pTexDesc) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25303, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25303, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25303, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25303, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 25303, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_pTexDesc); __Pyx_GIVEREF((PyObject *)__pyx_v_pTexDesc); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_pTexDesc)) != (0)) __PYX_ERR(0, 25303, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":25267 * return (_dict_cudaError_t[err], pResDesc) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGetTextureObjectTextureDesc(texObject): * """ Returns a texture object's texture descriptor. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGetTextureObjectTextureDesc", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_ptexObject); __Pyx_XDECREF((PyObject *)__pyx_v_pTexDesc); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":25305 * return (_dict_cudaError_t[err], pTexDesc) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGetTextureObjectResourceViewDesc(texObject): * """ Returns a texture object's resource view descriptor. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_359cudaGetTextureObjectResourceViewDesc(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_358cudaGetTextureObjectResourceViewDesc, "cudaGetTextureObjectResourceViewDesc(texObject)\n\nReturns a texture object's resource view descriptor.\n\nReturns the resource view descriptor for the texture object specified\nby `texObject`. If no resource view was specified,\n:py:obj:`~.cudaErrorInvalidValue` is returned.\n\nParameters\n----------\ntexObject : :py:obj:`~.cudaTextureObject_t`\n Texture object\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\npResViewDesc : :py:obj:`~.cudaResourceViewDesc`\n Resource view descriptor\n\nSee Also\n--------\n:py:obj:`~.cudaCreateTextureObject`, :py:obj:`~.cuTexObjectGetResourceViewDesc`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_359cudaGetTextureObjectResourceViewDesc = {"cudaGetTextureObjectResourceViewDesc", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_359cudaGetTextureObjectResourceViewDesc, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_358cudaGetTextureObjectResourceViewDesc}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_359cudaGetTextureObjectResourceViewDesc(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_texObject = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGetTextureObjectResourceViewDesc (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_texObject,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25305, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25305, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGetTextureObjectResourceViewDesc", 0) < (0)) __PYX_ERR(0, 25305, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGetTextureObjectResourceViewDesc", 1, 1, 1, i); __PYX_ERR(0, 25305, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25305, __pyx_L3_error) } __pyx_v_texObject = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGetTextureObjectResourceViewDesc", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 25305, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGetTextureObjectResourceViewDesc", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_358cudaGetTextureObjectResourceViewDesc(__pyx_self, __pyx_v_texObject); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_358cudaGetTextureObjectResourceViewDesc(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_texObject) { cudaTextureObject_t __pyx_v_cytexObject; PyObject *__pyx_v_ptexObject = NULL; struct __pyx_obj_4cuda_8bindings_7runtime_cudaResourceViewDesc *__pyx_v_pResViewDesc = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGetTextureObjectResourceViewDesc", 0); /* "cuda/bindings/runtime.pyx":25330 * """ * cdef cyruntime.cudaTextureObject_t cytexObject * if texObject is None: # <<<<<<<<<<<<<< * ptexObject = 0 * elif isinstance(texObject, (cudaTextureObject_t,)): */ __pyx_t_1 = (__pyx_v_texObject == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":25331 * cdef cyruntime.cudaTextureObject_t cytexObject * if texObject is None: * ptexObject = 0 # <<<<<<<<<<<<<< * elif isinstance(texObject, (cudaTextureObject_t,)): * ptexObject = int(texObject) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_ptexObject = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":25330 * """ * cdef cyruntime.cudaTextureObject_t cytexObject * if texObject is None: # <<<<<<<<<<<<<< * ptexObject = 0 * elif isinstance(texObject, (cudaTextureObject_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":25332 * if texObject is None: * ptexObject = 0 * elif isinstance(texObject, (cudaTextureObject_t,)): # <<<<<<<<<<<<<< * ptexObject = int(texObject) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_texObject, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaTextureObject_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":25333 * ptexObject = 0 * elif isinstance(texObject, (cudaTextureObject_t,)): * ptexObject = int(texObject) # <<<<<<<<<<<<<< * else: * ptexObject = int(cudaTextureObject_t(texObject)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_texObject); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25333, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_ptexObject = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":25332 * if texObject is None: * ptexObject = 0 * elif isinstance(texObject, (cudaTextureObject_t,)): # <<<<<<<<<<<<<< * ptexObject = int(texObject) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":25335 * ptexObject = int(texObject) * else: * ptexObject = int(cudaTextureObject_t(texObject)) # <<<<<<<<<<<<<< * cytexObject = ptexObject * cdef cudaResourceViewDesc pResViewDesc = cudaResourceViewDesc() */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaTextureObject_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaTextureObject_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_texObject}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25335, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25335, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_ptexObject = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":25336 * else: * ptexObject = int(cudaTextureObject_t(texObject)) * cytexObject = ptexObject # <<<<<<<<<<<<<< * cdef cudaResourceViewDesc pResViewDesc = cudaResourceViewDesc() * with nogil: */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_ptexObject); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 25336, __pyx_L1_error) __pyx_v_cytexObject = ((cudaTextureObject_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":25337 * ptexObject = int(cudaTextureObject_t(texObject)) * cytexObject = ptexObject * cdef cudaResourceViewDesc pResViewDesc = cudaResourceViewDesc() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGetTextureObjectResourceViewDesc(pResViewDesc._pvt_ptr, cytexObject) */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaResourceViewDesc); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaResourceViewDesc); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25337, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_4); } __pyx_v_pResViewDesc = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaResourceViewDesc *)__pyx_t_4); __pyx_t_4 = 0; /* "cuda/bindings/runtime.pyx":25338 * cytexObject = ptexObject * cdef cudaResourceViewDesc pResViewDesc = cudaResourceViewDesc() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGetTextureObjectResourceViewDesc(pResViewDesc._pvt_ptr, cytexObject) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":25339 * cdef cudaResourceViewDesc pResViewDesc = cudaResourceViewDesc() * with nogil: * err = cyruntime.cudaGetTextureObjectResourceViewDesc(pResViewDesc._pvt_ptr, cytexObject) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGetTextureObjectResourceViewDesc(((struct cudaResourceViewDesc *)__pyx_v_pResViewDesc->_pvt_ptr), __pyx_v_cytexObject); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25339, __pyx_L5_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":25338 * cytexObject = ptexObject * cdef cudaResourceViewDesc pResViewDesc = cudaResourceViewDesc() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGetTextureObjectResourceViewDesc(pResViewDesc._pvt_ptr, cytexObject) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L6; } __pyx_L5_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L6:; } } /* "cuda/bindings/runtime.pyx":25340 * with nogil: * err = cyruntime.cudaGetTextureObjectResourceViewDesc(pResViewDesc._pvt_ptr, cytexObject) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pResViewDesc) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":25341 * err = cyruntime.cudaGetTextureObjectResourceViewDesc(pResViewDesc._pvt_ptr, cytexObject) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], pResViewDesc) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25341, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25341, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25341, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25341, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 25341, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 25341, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":25340 * with nogil: * err = cyruntime.cudaGetTextureObjectResourceViewDesc(pResViewDesc._pvt_ptr, cytexObject) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pResViewDesc) */ } /* "cuda/bindings/runtime.pyx":25342 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pResViewDesc) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25342, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25342, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25342, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25342, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 25342, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_pResViewDesc); __Pyx_GIVEREF((PyObject *)__pyx_v_pResViewDesc); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_pResViewDesc)) != (0)) __PYX_ERR(0, 25342, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":25305 * return (_dict_cudaError_t[err], pTexDesc) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGetTextureObjectResourceViewDesc(texObject): * """ Returns a texture object's resource view descriptor. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGetTextureObjectResourceViewDesc", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_ptexObject); __Pyx_XDECREF((PyObject *)__pyx_v_pResViewDesc); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":25344 * return (_dict_cudaError_t[err], pResViewDesc) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaCreateSurfaceObject(pResDesc : Optional[cudaResourceDesc]): * """ Creates a surface object. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_361cudaCreateSurfaceObject(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_360cudaCreateSurfaceObject, "cudaCreateSurfaceObject(cudaResourceDesc pResDesc: Optional[cudaResourceDesc])\n\nCreates a surface object.\n\nCreates a surface object and returns it in `pSurfObject`. `pResDesc`\ndescribes the data to perform surface load/stores on.\n:py:obj:`~.cudaResourceDesc.resType` must be\n:py:obj:`~.cudaResourceTypeArray` and\n:py:obj:`~.cudaResourceDesc`::res::array::array must be set to a valid\nCUDA array handle.\n\nSurface objects are only supported on devices of compute capability 3.0\nor higher. Additionally, a surface object is an opaque value, and, as\nsuch, should only be accessed through CUDA API calls.\n\nParameters\n----------\npResDesc : :py:obj:`~.cudaResourceDesc`\n Resource descriptor\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidChannelDescriptor`, :py:obj:`~.cudaErrorInvalidResourceHandle`\npSurfObject : :py:obj:`~.cudaSurfaceObject_t`\n Surface object to create\n\nSee Also\n--------\n:py:obj:`~.cudaDestroySurfaceObject`, :py:obj:`~.cuSurfObjectCreate`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_361cudaCreateSurfaceObject = {"cudaCreateSurfaceObject", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_361cudaCreateSurfaceObject, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_360cudaCreateSurfaceObject}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_361cudaCreateSurfaceObject(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaResourceDesc *__pyx_v_pResDesc = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaCreateSurfaceObject (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_pResDesc,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25344, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25344, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaCreateSurfaceObject", 0) < (0)) __PYX_ERR(0, 25344, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaCreateSurfaceObject", 1, 1, 1, i); __PYX_ERR(0, 25344, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25344, __pyx_L3_error) } __pyx_v_pResDesc = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaResourceDesc *)values[0]); } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaCreateSurfaceObject", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 25344, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaCreateSurfaceObject", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_pResDesc), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaResourceDesc, 1, "pResDesc", 0))) __PYX_ERR(0, 25345, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_360cudaCreateSurfaceObject(__pyx_self, __pyx_v_pResDesc); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_360cudaCreateSurfaceObject(CYTHON_UNUSED PyObject *__pyx_self, struct __pyx_obj_4cuda_8bindings_7runtime_cudaResourceDesc *__pyx_v_pResDesc) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaSurfaceObject_t *__pyx_v_pSurfObject = 0; struct cudaResourceDesc *__pyx_v_cypResDesc_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; struct cudaResourceDesc *__pyx_t_5; int __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaCreateSurfaceObject", 0); /* "cuda/bindings/runtime.pyx":25375 * :py:obj:`~.cudaDestroySurfaceObject`, :py:obj:`~.cuSurfObjectCreate` * """ * cdef cudaSurfaceObject_t pSurfObject = cudaSurfaceObject_t() # <<<<<<<<<<<<<< * cdef cyruntime.cudaResourceDesc* cypResDesc_ptr = pResDesc._pvt_ptr if pResDesc is not None else NULL * with nogil: */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaSurfaceObject_t); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaSurfaceObject_t); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (1-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25375, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_pSurfObject = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaSurfaceObject_t *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":25376 * """ * cdef cudaSurfaceObject_t pSurfObject = cudaSurfaceObject_t() * cdef cyruntime.cudaResourceDesc* cypResDesc_ptr = pResDesc._pvt_ptr if pResDesc is not None else NULL # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaCreateSurfaceObject(pSurfObject._pvt_ptr, cypResDesc_ptr) */ __pyx_t_6 = (((PyObject *)__pyx_v_pResDesc) != Py_None); if (__pyx_t_6) { __pyx_t_5 = __pyx_v_pResDesc->_pvt_ptr; } else { __pyx_t_5 = NULL; } __pyx_v_cypResDesc_ptr = __pyx_t_5; /* "cuda/bindings/runtime.pyx":25377 * cdef cudaSurfaceObject_t pSurfObject = cudaSurfaceObject_t() * cdef cyruntime.cudaResourceDesc* cypResDesc_ptr = pResDesc._pvt_ptr if pResDesc is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaCreateSurfaceObject(pSurfObject._pvt_ptr, cypResDesc_ptr) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":25378 * cdef cyruntime.cudaResourceDesc* cypResDesc_ptr = pResDesc._pvt_ptr if pResDesc is not None else NULL * with nogil: * err = cyruntime.cudaCreateSurfaceObject(pSurfObject._pvt_ptr, cypResDesc_ptr) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaCreateSurfaceObject(((cudaSurfaceObject_t *)__pyx_v_pSurfObject->_pvt_ptr), __pyx_v_cypResDesc_ptr); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25378, __pyx_L4_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":25377 * cdef cudaSurfaceObject_t pSurfObject = cudaSurfaceObject_t() * cdef cyruntime.cudaResourceDesc* cypResDesc_ptr = pResDesc._pvt_ptr if pResDesc is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaCreateSurfaceObject(pSurfObject._pvt_ptr, cypResDesc_ptr) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":25379 * with nogil: * err = cyruntime.cudaCreateSurfaceObject(pSurfObject._pvt_ptr, cypResDesc_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pSurfObject) */ __pyx_t_6 = (__pyx_v_err != cudaSuccess); if (__pyx_t_6) { /* "cuda/bindings/runtime.pyx":25380 * err = cyruntime.cudaCreateSurfaceObject(pSurfObject._pvt_ptr, cypResDesc_ptr) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], pSurfObject) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25380, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25380, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25380, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25380, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 25380, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 25380, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":25379 * with nogil: * err = cyruntime.cudaCreateSurfaceObject(pSurfObject._pvt_ptr, cypResDesc_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pSurfObject) */ } /* "cuda/bindings/runtime.pyx":25381 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pSurfObject) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25381, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25381, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25381, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25381, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 25381, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_pSurfObject); __Pyx_GIVEREF((PyObject *)__pyx_v_pSurfObject); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_pSurfObject)) != (0)) __PYX_ERR(0, 25381, __pyx_L1_error); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":25344 * return (_dict_cudaError_t[err], pResViewDesc) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaCreateSurfaceObject(pResDesc : Optional[cudaResourceDesc]): * """ Creates a surface object. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaCreateSurfaceObject", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_pSurfObject); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":25383 * return (_dict_cudaError_t[err], pSurfObject) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDestroySurfaceObject(surfObject): * """ Destroys a surface object. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_363cudaDestroySurfaceObject(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_362cudaDestroySurfaceObject, "cudaDestroySurfaceObject(surfObject)\n\nDestroys a surface object.\n\nDestroys the surface object specified by `surfObject`.\n\nParameters\n----------\nsurfObject : :py:obj:`~.cudaSurfaceObject_t`\n Surface object to destroy\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\n\nSee Also\n--------\n:py:obj:`~.cudaCreateSurfaceObject`, :py:obj:`~.cuSurfObjectDestroy`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_363cudaDestroySurfaceObject = {"cudaDestroySurfaceObject", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_363cudaDestroySurfaceObject, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_362cudaDestroySurfaceObject}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_363cudaDestroySurfaceObject(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_surfObject = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaDestroySurfaceObject (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_surfObject,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25383, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25383, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaDestroySurfaceObject", 0) < (0)) __PYX_ERR(0, 25383, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaDestroySurfaceObject", 1, 1, 1, i); __PYX_ERR(0, 25383, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25383, __pyx_L3_error) } __pyx_v_surfObject = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaDestroySurfaceObject", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 25383, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaDestroySurfaceObject", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_362cudaDestroySurfaceObject(__pyx_self, __pyx_v_surfObject); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_362cudaDestroySurfaceObject(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_surfObject) { cudaSurfaceObject_t __pyx_v_cysurfObject; PyObject *__pyx_v_psurfObject = NULL; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaDestroySurfaceObject", 0); /* "cuda/bindings/runtime.pyx":25404 * """ * cdef cyruntime.cudaSurfaceObject_t cysurfObject * if surfObject is None: # <<<<<<<<<<<<<< * psurfObject = 0 * elif isinstance(surfObject, (cudaSurfaceObject_t,)): */ __pyx_t_1 = (__pyx_v_surfObject == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":25405 * cdef cyruntime.cudaSurfaceObject_t cysurfObject * if surfObject is None: * psurfObject = 0 # <<<<<<<<<<<<<< * elif isinstance(surfObject, (cudaSurfaceObject_t,)): * psurfObject = int(surfObject) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_psurfObject = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":25404 * """ * cdef cyruntime.cudaSurfaceObject_t cysurfObject * if surfObject is None: # <<<<<<<<<<<<<< * psurfObject = 0 * elif isinstance(surfObject, (cudaSurfaceObject_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":25406 * if surfObject is None: * psurfObject = 0 * elif isinstance(surfObject, (cudaSurfaceObject_t,)): # <<<<<<<<<<<<<< * psurfObject = int(surfObject) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_surfObject, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaSurfaceObject_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":25407 * psurfObject = 0 * elif isinstance(surfObject, (cudaSurfaceObject_t,)): * psurfObject = int(surfObject) # <<<<<<<<<<<<<< * else: * psurfObject = int(cudaSurfaceObject_t(surfObject)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_surfObject); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_psurfObject = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":25406 * if surfObject is None: * psurfObject = 0 * elif isinstance(surfObject, (cudaSurfaceObject_t,)): # <<<<<<<<<<<<<< * psurfObject = int(surfObject) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":25409 * psurfObject = int(surfObject) * else: * psurfObject = int(cudaSurfaceObject_t(surfObject)) # <<<<<<<<<<<<<< * cysurfObject = psurfObject * with nogil: */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaSurfaceObject_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaSurfaceObject_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_surfObject}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25409, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25409, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_psurfObject = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":25410 * else: * psurfObject = int(cudaSurfaceObject_t(surfObject)) * cysurfObject = psurfObject # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaDestroySurfaceObject(cysurfObject) */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_psurfObject); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 25410, __pyx_L1_error) __pyx_v_cysurfObject = ((cudaSurfaceObject_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":25411 * psurfObject = int(cudaSurfaceObject_t(surfObject)) * cysurfObject = psurfObject * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDestroySurfaceObject(cysurfObject) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":25412 * cysurfObject = psurfObject * with nogil: * err = cyruntime.cudaDestroySurfaceObject(cysurfObject) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaDestroySurfaceObject(__pyx_v_cysurfObject); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25412, __pyx_L5_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":25411 * psurfObject = int(cudaSurfaceObject_t(surfObject)) * cysurfObject = psurfObject * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDestroySurfaceObject(cysurfObject) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L6; } __pyx_L5_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L6:; } } /* "cuda/bindings/runtime.pyx":25413 * with nogil: * err = cyruntime.cudaDestroySurfaceObject(cysurfObject) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25413, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25413, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25413, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25413, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 25413, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":25383 * return (_dict_cudaError_t[err], pSurfObject) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDestroySurfaceObject(surfObject): * """ Destroys a surface object. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaDestroySurfaceObject", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_psurfObject); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":25415 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGetSurfaceObjectResourceDesc(surfObject): * """ Returns a surface object's resource descriptor Returns the resource descriptor for the surface object specified by `surfObject`. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_365cudaGetSurfaceObjectResourceDesc(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_364cudaGetSurfaceObjectResourceDesc, "cudaGetSurfaceObjectResourceDesc(surfObject)\n\nReturns a surface object's resource descriptor Returns the resource descriptor for the surface object specified by `surfObject`.\n\nParameters\n----------\nsurfObject : :py:obj:`~.cudaSurfaceObject_t`\n Surface object\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\npResDesc : :py:obj:`~.cudaResourceDesc`\n Resource descriptor\n\nSee Also\n--------\n:py:obj:`~.cudaCreateSurfaceObject`, :py:obj:`~.cuSurfObjectGetResourceDesc`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_365cudaGetSurfaceObjectResourceDesc = {"cudaGetSurfaceObjectResourceDesc", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_365cudaGetSurfaceObjectResourceDesc, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_364cudaGetSurfaceObjectResourceDesc}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_365cudaGetSurfaceObjectResourceDesc(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_surfObject = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGetSurfaceObjectResourceDesc (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_surfObject,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25415, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25415, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGetSurfaceObjectResourceDesc", 0) < (0)) __PYX_ERR(0, 25415, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGetSurfaceObjectResourceDesc", 1, 1, 1, i); __PYX_ERR(0, 25415, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25415, __pyx_L3_error) } __pyx_v_surfObject = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGetSurfaceObjectResourceDesc", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 25415, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGetSurfaceObjectResourceDesc", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_364cudaGetSurfaceObjectResourceDesc(__pyx_self, __pyx_v_surfObject); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_364cudaGetSurfaceObjectResourceDesc(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_surfObject) { cudaSurfaceObject_t __pyx_v_cysurfObject; PyObject *__pyx_v_psurfObject = NULL; struct __pyx_obj_4cuda_8bindings_7runtime_cudaResourceDesc *__pyx_v_pResDesc = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; size_t __pyx_t_5; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_6; cudaError_t __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGetSurfaceObjectResourceDesc", 0); /* "cuda/bindings/runtime.pyx":25436 * """ * cdef cyruntime.cudaSurfaceObject_t cysurfObject * if surfObject is None: # <<<<<<<<<<<<<< * psurfObject = 0 * elif isinstance(surfObject, (cudaSurfaceObject_t,)): */ __pyx_t_1 = (__pyx_v_surfObject == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":25437 * cdef cyruntime.cudaSurfaceObject_t cysurfObject * if surfObject is None: * psurfObject = 0 # <<<<<<<<<<<<<< * elif isinstance(surfObject, (cudaSurfaceObject_t,)): * psurfObject = int(surfObject) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_psurfObject = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":25436 * """ * cdef cyruntime.cudaSurfaceObject_t cysurfObject * if surfObject is None: # <<<<<<<<<<<<<< * psurfObject = 0 * elif isinstance(surfObject, (cudaSurfaceObject_t,)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":25438 * if surfObject is None: * psurfObject = 0 * elif isinstance(surfObject, (cudaSurfaceObject_t,)): # <<<<<<<<<<<<<< * psurfObject = int(surfObject) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_surfObject, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaSurfaceObject_t); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":25439 * psurfObject = 0 * elif isinstance(surfObject, (cudaSurfaceObject_t,)): * psurfObject = int(surfObject) # <<<<<<<<<<<<<< * else: * psurfObject = int(cudaSurfaceObject_t(surfObject)) */ __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_v_surfObject); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25439, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_psurfObject = ((PyObject*)__pyx_t_2); __pyx_t_2 = 0; /* "cuda/bindings/runtime.pyx":25438 * if surfObject is None: * psurfObject = 0 * elif isinstance(surfObject, (cudaSurfaceObject_t,)): # <<<<<<<<<<<<<< * psurfObject = int(surfObject) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":25441 * psurfObject = int(surfObject) * else: * psurfObject = int(cudaSurfaceObject_t(surfObject)) # <<<<<<<<<<<<<< * cysurfObject = psurfObject * cdef cudaResourceDesc pResDesc = cudaResourceDesc() */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaSurfaceObject_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaSurfaceObject_t); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_surfObject}; __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_5, (2-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25441, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_2); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25441, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_2); __pyx_t_2 = 0; __pyx_v_psurfObject = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":25442 * else: * psurfObject = int(cudaSurfaceObject_t(surfObject)) * cysurfObject = psurfObject # <<<<<<<<<<<<<< * cdef cudaResourceDesc pResDesc = cudaResourceDesc() * with nogil: */ __pyx_t_6 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_psurfObject); if (unlikely((__pyx_t_6 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 25442, __pyx_L1_error) __pyx_v_cysurfObject = ((cudaSurfaceObject_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_6)); /* "cuda/bindings/runtime.pyx":25443 * psurfObject = int(cudaSurfaceObject_t(surfObject)) * cysurfObject = psurfObject * cdef cudaResourceDesc pResDesc = cudaResourceDesc() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGetSurfaceObjectResourceDesc(pResDesc._pvt_ptr, cysurfObject) */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaResourceDesc); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaResourceDesc); __pyx_t_5 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_5, (1-__pyx_t_5) | (__pyx_t_5*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25443, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_4); } __pyx_v_pResDesc = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaResourceDesc *)__pyx_t_4); __pyx_t_4 = 0; /* "cuda/bindings/runtime.pyx":25444 * cysurfObject = psurfObject * cdef cudaResourceDesc pResDesc = cudaResourceDesc() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGetSurfaceObjectResourceDesc(pResDesc._pvt_ptr, cysurfObject) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":25445 * cdef cudaResourceDesc pResDesc = cudaResourceDesc() * with nogil: * err = cyruntime.cudaGetSurfaceObjectResourceDesc(pResDesc._pvt_ptr, cysurfObject) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_7 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGetSurfaceObjectResourceDesc(((struct cudaResourceDesc *)__pyx_v_pResDesc->_pvt_ptr), __pyx_v_cysurfObject); if (unlikely(__pyx_t_7 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25445, __pyx_L5_error) __pyx_v_err = __pyx_t_7; } /* "cuda/bindings/runtime.pyx":25444 * cysurfObject = psurfObject * cdef cudaResourceDesc pResDesc = cudaResourceDesc() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGetSurfaceObjectResourceDesc(pResDesc._pvt_ptr, cysurfObject) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L6; } __pyx_L5_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L6:; } } /* "cuda/bindings/runtime.pyx":25446 * with nogil: * err = cyruntime.cudaGetSurfaceObjectResourceDesc(pResDesc._pvt_ptr, cysurfObject) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pResDesc) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":25447 * err = cyruntime.cudaGetSurfaceObjectResourceDesc(pResDesc._pvt_ptr, cysurfObject) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], pResDesc) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 25447, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 25447, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":25446 * with nogil: * err = cyruntime.cudaGetSurfaceObjectResourceDesc(pResDesc._pvt_ptr, cysurfObject) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pResDesc) */ } /* "cuda/bindings/runtime.pyx":25448 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pResDesc) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25448, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25448, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25448, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25448, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 25448, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_pResDesc); __Pyx_GIVEREF((PyObject *)__pyx_v_pResDesc); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_pResDesc)) != (0)) __PYX_ERR(0, 25448, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":25415 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGetSurfaceObjectResourceDesc(surfObject): * """ Returns a surface object's resource descriptor Returns the resource descriptor for the surface object specified by `surfObject`. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGetSurfaceObjectResourceDesc", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_psurfObject); __Pyx_XDECREF((PyObject *)__pyx_v_pResDesc); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":25450 * return (_dict_cudaError_t[err], pResDesc) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDriverGetVersion(): * """ Returns the latest version of CUDA supported by the driver. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_367cudaDriverGetVersion(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_366cudaDriverGetVersion, "cudaDriverGetVersion()\n\nReturns the latest version of CUDA supported by the driver.\n\nReturns in `*driverVersion` the latest version of CUDA supported by the\ndriver. The version is returned as (1000 * major + 10 * minor). For\nexample, CUDA 9.2 would be represented by 9020. If no driver is\ninstalled, then 0 is returned as the driver version.\n\nThis function automatically returns :py:obj:`~.cudaErrorInvalidValue`\nif `driverVersion` is NULL.\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\ndriverVersion : int\n Returns the CUDA driver version.\n\nSee Also\n--------\n:py:obj:`~.cudaRuntimeGetVersion`, :py:obj:`~.cuDriverGetVersion`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_367cudaDriverGetVersion = {"cudaDriverGetVersion", (PyCFunction)__pyx_pw_4cuda_8bindings_7runtime_367cudaDriverGetVersion, METH_NOARGS, __pyx_doc_4cuda_8bindings_7runtime_366cudaDriverGetVersion}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_367cudaDriverGetVersion(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaDriverGetVersion (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_366cudaDriverGetVersion(__pyx_self); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_366cudaDriverGetVersion(CYTHON_UNUSED PyObject *__pyx_self) { int __pyx_v_driverVersion; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations cudaError_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaDriverGetVersion", 0); /* "cuda/bindings/runtime.pyx":25473 * :py:obj:`~.cudaRuntimeGetVersion`, :py:obj:`~.cuDriverGetVersion` * """ * cdef int driverVersion = 0 # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaDriverGetVersion(&driverVersion) */ __pyx_v_driverVersion = 0; /* "cuda/bindings/runtime.pyx":25474 * """ * cdef int driverVersion = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDriverGetVersion(&driverVersion) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":25475 * cdef int driverVersion = 0 * with nogil: * err = cyruntime.cudaDriverGetVersion(&driverVersion) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_1 = __pyx_f_4cuda_8bindings_9cyruntime_cudaDriverGetVersion((&__pyx_v_driverVersion)); if (unlikely(__pyx_t_1 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25475, __pyx_L4_error) __pyx_v_err = __pyx_t_1; } /* "cuda/bindings/runtime.pyx":25474 * """ * cdef int driverVersion = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaDriverGetVersion(&driverVersion) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":25476 * with nogil: * err = cyruntime.cudaDriverGetVersion(&driverVersion) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], driverVersion) */ __pyx_t_2 = (__pyx_v_err != cudaSuccess); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":25477 * err = cyruntime.cudaDriverGetVersion(&driverVersion) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], driverVersion) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25477, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25477, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25477, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25477, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 25477, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, Py_None) != (0)) __PYX_ERR(0, 25477, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":25476 * with nogil: * err = cyruntime.cudaDriverGetVersion(&driverVersion) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], driverVersion) */ } /* "cuda/bindings/runtime.pyx":25478 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], driverVersion) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25478, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25478, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25478, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyLong_From_int(__pyx_v_driverVersion); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25478, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25478, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 25478, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_5) != (0)) __PYX_ERR(0, 25478, __pyx_L1_error); __pyx_t_3 = 0; __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":25450 * return (_dict_cudaError_t[err], pResDesc) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaDriverGetVersion(): * """ Returns the latest version of CUDA supported by the driver. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaDriverGetVersion", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":25480 * return (_dict_cudaError_t[err], driverVersion) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaRuntimeGetVersion(): * """ Returns the CUDA Runtime version. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_369cudaRuntimeGetVersion(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_368cudaRuntimeGetVersion, "cudaRuntimeGetVersion()\n\nReturns the CUDA Runtime version.\n\nReturns in `*runtimeVersion` the version number of the current CUDA\nRuntime instance. The version is returned as (1000 * major + 10 *\nminor). For example, CUDA 9.2 would be represented by 9020.\n\nAs of CUDA 12.0, this function no longer initializes CUDA. The purpose\nof this API is solely to return a compile-time constant stating the\nCUDA Toolkit version in the above format.\n\nThis function automatically returns :py:obj:`~.cudaErrorInvalidValue`\nif the `runtimeVersion` argument is NULL.\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\nruntimeVersion : int\n Returns the CUDA Runtime version.\n\nSee Also\n--------\n:py:obj:`~.cudaDriverGetVersion`, :py:obj:`~.cuDriverGetVersion`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_369cudaRuntimeGetVersion = {"cudaRuntimeGetVersion", (PyCFunction)__pyx_pw_4cuda_8bindings_7runtime_369cudaRuntimeGetVersion, METH_NOARGS, __pyx_doc_4cuda_8bindings_7runtime_368cudaRuntimeGetVersion}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_369cudaRuntimeGetVersion(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaRuntimeGetVersion (wrapper)", 0); __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_368cudaRuntimeGetVersion(__pyx_self); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_368cudaRuntimeGetVersion(CYTHON_UNUSED PyObject *__pyx_self) { int __pyx_v_runtimeVersion; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations cudaError_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaRuntimeGetVersion", 0); /* "cuda/bindings/runtime.pyx":25506 * :py:obj:`~.cudaDriverGetVersion`, :py:obj:`~.cuDriverGetVersion` * """ * cdef int runtimeVersion = 0 # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaRuntimeGetVersion(&runtimeVersion) */ __pyx_v_runtimeVersion = 0; /* "cuda/bindings/runtime.pyx":25507 * """ * cdef int runtimeVersion = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaRuntimeGetVersion(&runtimeVersion) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":25508 * cdef int runtimeVersion = 0 * with nogil: * err = cyruntime.cudaRuntimeGetVersion(&runtimeVersion) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_1 = __pyx_f_4cuda_8bindings_9cyruntime_cudaRuntimeGetVersion((&__pyx_v_runtimeVersion)); if (unlikely(__pyx_t_1 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25508, __pyx_L4_error) __pyx_v_err = __pyx_t_1; } /* "cuda/bindings/runtime.pyx":25507 * """ * cdef int runtimeVersion = 0 * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaRuntimeGetVersion(&runtimeVersion) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":25509 * with nogil: * err = cyruntime.cudaRuntimeGetVersion(&runtimeVersion) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], runtimeVersion) */ __pyx_t_2 = (__pyx_v_err != cudaSuccess); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":25510 * err = cyruntime.cudaRuntimeGetVersion(&runtimeVersion) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], runtimeVersion) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 25510, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, Py_None) != (0)) __PYX_ERR(0, 25510, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":25509 * with nogil: * err = cyruntime.cudaRuntimeGetVersion(&runtimeVersion) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], runtimeVersion) */ } /* "cuda/bindings/runtime.pyx":25511 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], runtimeVersion) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25511, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25511, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25511, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyLong_From_int(__pyx_v_runtimeVersion); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25511, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25511, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 25511, __pyx_L1_error); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_5) != (0)) __PYX_ERR(0, 25511, __pyx_L1_error); __pyx_t_3 = 0; __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":25480 * return (_dict_cudaError_t[err], driverVersion) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaRuntimeGetVersion(): * """ Returns the CUDA Runtime version. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaRuntimeGetVersion", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":25513 * return (_dict_cudaError_t[err], runtimeVersion) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphCreate(unsigned int flags): * """ Creates a graph. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_371cudaGraphCreate(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_370cudaGraphCreate, "cudaGraphCreate(unsigned int flags)\n\nCreates a graph.\n\nCreates an empty graph, which is returned via `pGraph`.\n\nParameters\n----------\nflags : unsigned int\n Graph creation flags, must be 0\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorMemoryAllocation`\npGraph : :py:obj:`~.cudaGraph_t`\n Returns newly created graph\n\nSee Also\n--------\n:py:obj:`~.cudaGraphAddChildGraphNode`, :py:obj:`~.cudaGraphAddEmptyNode`, :py:obj:`~.cudaGraphAddKernelNode`, :py:obj:`~.cudaGraphAddHostNode`, :py:obj:`~.cudaGraphAddMemcpyNode`, :py:obj:`~.cudaGraphAddMemsetNode`, :py:obj:`~.cudaGraphInstantiate`, :py:obj:`~.cudaGraphDestroy`, :py:obj:`~.cudaGraphGetNodes`, :py:obj:`~.cudaGraphGetRootNodes`, :py:obj:`~.cudaGraphGetEdges`, :py:obj:`~.cudaGraphClone`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_371cudaGraphCreate = {"cudaGraphCreate", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_371cudaGraphCreate, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_370cudaGraphCreate}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_371cudaGraphCreate(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { unsigned int __pyx_v_flags; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGraphCreate (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_flags_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25513, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25513, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGraphCreate", 0) < (0)) __PYX_ERR(0, 25513, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGraphCreate", 1, 1, 1, i); __PYX_ERR(0, 25513, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25513, __pyx_L3_error) } __pyx_v_flags = __Pyx_PyLong_As_unsigned_int(values[0]); if (unlikely((__pyx_v_flags == (unsigned int)-1) && PyErr_Occurred())) __PYX_ERR(0, 25514, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGraphCreate", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 25513, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphCreate", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_370cudaGraphCreate(__pyx_self, __pyx_v_flags); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_370cudaGraphCreate(CYTHON_UNUSED PyObject *__pyx_self, unsigned int __pyx_v_flags) { struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraph_t *__pyx_v_pGraph = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; size_t __pyx_t_4; cudaError_t __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGraphCreate", 0); /* "cuda/bindings/runtime.pyx":25535 * :py:obj:`~.cudaGraphAddChildGraphNode`, :py:obj:`~.cudaGraphAddEmptyNode`, :py:obj:`~.cudaGraphAddKernelNode`, :py:obj:`~.cudaGraphAddHostNode`, :py:obj:`~.cudaGraphAddMemcpyNode`, :py:obj:`~.cudaGraphAddMemsetNode`, :py:obj:`~.cudaGraphInstantiate`, :py:obj:`~.cudaGraphDestroy`, :py:obj:`~.cudaGraphGetNodes`, :py:obj:`~.cudaGraphGetRootNodes`, :py:obj:`~.cudaGraphGetEdges`, :py:obj:`~.cudaGraphClone` * """ * cdef cudaGraph_t pGraph = cudaGraph_t() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGraphCreate(pGraph._pvt_ptr, flags) */ __pyx_t_2 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); __pyx_t_4 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_2, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_4, (1-__pyx_t_4) | (__pyx_t_4*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25535, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_pGraph = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraph_t *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":25536 * """ * cdef cudaGraph_t pGraph = cudaGraph_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphCreate(pGraph._pvt_ptr, flags) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":25537 * cdef cudaGraph_t pGraph = cudaGraph_t() * with nogil: * err = cyruntime.cudaGraphCreate(pGraph._pvt_ptr, flags) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_5 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGraphCreate(((cudaGraph_t *)__pyx_v_pGraph->__pyx_base._pvt_ptr), __pyx_v_flags); if (unlikely(__pyx_t_5 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25537, __pyx_L4_error) __pyx_v_err = __pyx_t_5; } /* "cuda/bindings/runtime.pyx":25536 * """ * cdef cudaGraph_t pGraph = cudaGraph_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphCreate(pGraph._pvt_ptr, flags) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L5; } __pyx_L4_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L5:; } } /* "cuda/bindings/runtime.pyx":25538 * with nogil: * err = cyruntime.cudaGraphCreate(pGraph._pvt_ptr, flags) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pGraph) */ __pyx_t_6 = (__pyx_v_err != cudaSuccess); if (__pyx_t_6) { /* "cuda/bindings/runtime.pyx":25539 * err = cyruntime.cudaGraphCreate(pGraph._pvt_ptr, flags) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], pGraph) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25539, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25539, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25539, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25539, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_2); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2) != (0)) __PYX_ERR(0, 25539, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 25539, __pyx_L1_error); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":25538 * with nogil: * err = cyruntime.cudaGraphCreate(pGraph._pvt_ptr, flags) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pGraph) */ } /* "cuda/bindings/runtime.pyx":25540 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pGraph) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25540, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25540, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25540, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25540, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 25540, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_pGraph); __Pyx_GIVEREF((PyObject *)__pyx_v_pGraph); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_pGraph)) != (0)) __PYX_ERR(0, 25540, __pyx_L1_error); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":25513 * return (_dict_cudaError_t[err], runtimeVersion) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphCreate(unsigned int flags): * """ Creates a graph. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphCreate", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_pGraph); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":25542 * return (_dict_cudaError_t[err], pGraph) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphAddKernelNode(graph, pDependencies : Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies, pNodeParams : Optional[cudaKernelNodeParams]): * """ Creates a kernel execution node and adds it to a graph. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_373cudaGraphAddKernelNode(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_372cudaGraphAddKernelNode, "cudaGraphAddKernelNode(graph, pDependencies: Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies, cudaKernelNodeParams pNodeParams: Optional[cudaKernelNodeParams])\n\nCreates a kernel execution node and adds it to a graph.\n\nCreates a new kernel execution node and adds it to `graph` with\n`numDependencies` dependencies specified via `pDependencies` and\narguments specified in `pNodeParams`. It is possible for\n`numDependencies` to be 0, in which case the node will be placed at the\nroot of the graph. `pDependencies` may not have any duplicate entries.\nA handle to the new node will be returned in `pGraphNode`.\n\nThe :py:obj:`~.cudaKernelNodeParams` structure is defined as:\n\n**View CUDA Toolkit Documentation for a C++ code example**\n\nWhen the graph is launched, the node will invoke kernel `func` on a\n(`gridDim.x` x `gridDim.y` x `gridDim.z`) grid of blocks. Each block\ncontains (`blockDim.x` x `blockDim.y` x `blockDim.z`) threads.\n\n`sharedMem` sets the amount of dynamic shared memory that will be\navailable to each thread block.\n\nKernel parameters to `func` can be specified in one of two ways:\n\n1) Kernel parameters can be specified via `kernelParams`. If the kernel\nhas N parameters, then `kernelParams` needs to be an array of N\npointers. Each pointer, from `kernelParams`[0] to `kernelParams`[N-1],\npoints to the region of memory from which the actual parameter will be\ncopied. The number of kernel parameters and their offsets and sizes do\nnot need to be specified as that information is retrieved directly from\nthe kernel's image.\n\n2) Kernel parameters can also be packaged by the application into a\nsingle buffer that is passed in via `extra`. This places the burden on\nthe application of knowing each kernel parameter's size and\nalignment/padding within the buffer. The `extra` parameter exists to\nallow this function to take additional less commonly used arguments.\n`extra` specifies a list of names of extra settings and"" their\ncorresponding values. Each extra setting name is immediately followed\nby the corresponding value. The list must be terminated with either\nNULL or CU_LAUNCH_PARAM_END.\n\n- :py:obj:`~.CU_LAUNCH_PARAM_END`, which indicates the end of the\n `extra` array;\n\n- :py:obj:`~.CU_LAUNCH_PARAM_BUFFER_POINTER`, which specifies that the\n next value in `extra` will be a pointer to a buffer containing all\n the kernel parameters for launching kernel `func`;\n\n- :py:obj:`~.CU_LAUNCH_PARAM_BUFFER_SIZE`, which specifies that the\n next value in `extra` will be a pointer to a size_t containing the\n size of the buffer specified with\n :py:obj:`~.CU_LAUNCH_PARAM_BUFFER_POINTER`;\n\nThe error :py:obj:`~.cudaErrorInvalidValue` will be returned if kernel\nparameters are specified with both `kernelParams` and `extra` (i.e.\nboth `kernelParams` and `extra` are non-NULL).\n\nThe `kernelParams` or `extra` array, as well as the argument values it\npoints to, are copied during this call.\n\nParameters\n----------\ngraph : :py:obj:`~.CUgraph` or :py:obj:`~.cudaGraph_t`\n Graph to which to add the node\npDependencies : list[:py:obj:`~.cudaGraphNode_t`]\n Dependencies of the node\nnumDependencies : size_t\n Number of dependencies\npNodeParams : :py:obj:`~.cudaKernelNodeParams`\n Parameters for the GPU execution node\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidDeviceFunction`\npGraphNode : :py:obj:`~.cudaGraphNode_t`\n Returns newly created node\n\nSee Also\n--------\n:py:obj:`~.cudaGraphAddNode`, :py:obj:`~.cudaLaunchKernel`, :py:obj:`~.cudaGraphKernelNodeGetParams`, :py:obj:`~.cudaGraphKernelNodeSetParams`, :py:obj:`~.cudaGraphCreate`, :py:obj:`~.cudaGraphDestroyNode`, :py:obj:`~.cudaGraphAddChildGraphNode`, :py:obj:`~.cudaGraphAddEmptyNode`, :py:obj:`~.cudaGraphAddHostNode`, :py:obj:`~.cudaGraphAddMemcpyNode`, :py:obj:`~.cudaGraphAddMemsetNode`\n\nNotes\n-----\nKernels launched usin""g graphs must not use texture and surface references. Reading or writing through any texture or surface reference is undefined behavior. This restriction does not apply to texture and surface objects."); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_373cudaGraphAddKernelNode = {"cudaGraphAddKernelNode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_373cudaGraphAddKernelNode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_372cudaGraphAddKernelNode}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_373cudaGraphAddKernelNode(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_graph = 0; PyObject *__pyx_v_pDependencies = 0; size_t __pyx_v_numDependencies; struct __pyx_obj_4cuda_8bindings_7runtime_cudaKernelNodeParams *__pyx_v_pNodeParams = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[4] = {0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGraphAddKernelNode (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_graph_2,&__pyx_mstate_global->__pyx_n_u_pDependencies,&__pyx_mstate_global->__pyx_n_u_numDependencies,&__pyx_mstate_global->__pyx_n_u_pNodeParams,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25542, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 25542, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 25542, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25542, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25542, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGraphAddKernelNode", 0) < (0)) __PYX_ERR(0, 25542, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 4; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGraphAddKernelNode", 1, 4, 4, i); __PYX_ERR(0, 25542, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 4)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25542, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25542, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 25542, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 25542, __pyx_L3_error) } __pyx_v_graph = values[0]; __pyx_v_pDependencies = values[1]; __pyx_v_numDependencies = __Pyx_PyLong_As_size_t(values[2]); if (unlikely((__pyx_v_numDependencies == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 25543, __pyx_L3_error) __pyx_v_pNodeParams = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaKernelNodeParams *)values[3]); } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGraphAddKernelNode", 1, 4, 4, __pyx_nargs); __PYX_ERR(0, 25542, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphAddKernelNode", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_pNodeParams), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaKernelNodeParams, 1, "pNodeParams", 0))) __PYX_ERR(0, 25543, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_372cudaGraphAddKernelNode(__pyx_self, __pyx_v_graph, __pyx_v_pDependencies, __pyx_v_numDependencies, __pyx_v_pNodeParams); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_22cudaGraphAddKernelNode_2generator94(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value); /* proto */ /* "cuda/bindings/runtime.pyx":25630 * """ * pDependencies = [] if pDependencies is None else pDependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): # <<<<<<<<<<<<<< * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_22cudaGraphAddKernelNode_genexpr(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_genexpr_arg_0) { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_94_genexpr *__pyx_cur_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("genexpr", 0); __pyx_cur_scope = (struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_94_genexpr *)__pyx_tp_new_4cuda_8bindings_7runtime___pyx_scope_struct_94_genexpr(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime___pyx_scope_struct_94_genexpr, __pyx_mstate_global->__pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_94_genexpr *)Py_None); __Pyx_INCREF(Py_None); __PYX_ERR(0, 25630, __pyx_L1_error) } else { __Pyx_GOTREF((PyObject *)__pyx_cur_scope); } __pyx_cur_scope->__pyx_genexpr_arg_0 = __pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_cur_scope->__pyx_genexpr_arg_0); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_genexpr_arg_0); { __pyx_CoroutineObject *gen = __Pyx_Generator_New((__pyx_coroutine_body_t) __pyx_gb_4cuda_8bindings_7runtime_22cudaGraphAddKernelNode_2generator94, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[94]), (PyObject *) __pyx_cur_scope, __pyx_mstate_global->__pyx_n_u_genexpr, __pyx_mstate_global->__pyx_n_u_cudaGraphAddKernelNode_locals_ge, __pyx_mstate_global->__pyx_n_u_cuda_bindings_runtime); if (unlikely(!gen)) __PYX_ERR(0, 25630, __pyx_L1_error) __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; } /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphAddKernelNode.genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_DECREF((PyObject *)__pyx_cur_scope); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_22cudaGraphAddKernelNode_2generator94(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value) /* generator body */ { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_94_genexpr *__pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_94_genexpr *)__pyx_generator->closure); PyObject *__pyx_r = NULL; PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *(*__pyx_t_3)(PyObject *); PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("genexpr", 0); switch (__pyx_generator->resume_label) { case 0: goto __pyx_L3_first_run; default: /* CPython raises the right error here */ __Pyx_RefNannyFinishContext(); return NULL; } __pyx_L3_first_run:; if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 25630, __pyx_L1_error) if (unlikely(!__pyx_cur_scope->__pyx_genexpr_arg_0)) { __Pyx_RaiseUnboundLocalError(".0"); __PYX_ERR(0, 25630, __pyx_L1_error) } if (likely(PyList_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) || PyTuple_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) { __pyx_t_1 = __pyx_cur_scope->__pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; __pyx_t_3 = NULL; } else { __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_cur_scope->__pyx_genexpr_arg_0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25630, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25630, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_3)) { if (likely(PyList_CheckExact(__pyx_t_1))) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 25630, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } __pyx_t_4 = __Pyx_PyList_GetItemRef(__pyx_t_1, __pyx_t_2); ++__pyx_t_2; } else { { Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 25630, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2)); #else __pyx_t_4 = __Pyx_PySequence_ITEM(__pyx_t_1, __pyx_t_2); #endif ++__pyx_t_2; } if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25630, __pyx_L1_error) } else { __pyx_t_4 = __pyx_t_3(__pyx_t_1); if (unlikely(!__pyx_t_4)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 25630, __pyx_L1_error) PyErr_Clear(); } break; } } __Pyx_GOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v__x); __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v__x, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_TypeCheck(__pyx_cur_scope->__pyx_v__x, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); if (!__pyx_t_6) { } else { __pyx_t_5 = __pyx_t_6; goto __pyx_L7_bool_binop_done; } __pyx_t_6 = __Pyx_TypeCheck(__pyx_cur_scope->__pyx_v__x, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraphNode); __pyx_t_5 = __pyx_t_6; __pyx_L7_bool_binop_done:; __pyx_t_6 = (!__pyx_t_5); if (__pyx_t_6) { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_False); __pyx_r = Py_False; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_True); __pyx_r = Py_True; goto __pyx_L0; } CYTHON_MAYBE_UNUSED_VAR(__pyx_cur_scope); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); if (__Pyx_PyErr_Occurred()) { __Pyx_Generator_Replace_StopIteration(0); __Pyx_AddTraceback("genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); } __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); #if !CYTHON_USE_EXC_INFO_STACK __Pyx_Coroutine_ResetAndClearException(__pyx_generator); #endif __pyx_generator->resume_label = -1; __Pyx_Coroutine_clear((PyObject*)__pyx_generator); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":25542 * return (_dict_cudaError_t[err], pGraph) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphAddKernelNode(graph, pDependencies : Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies, pNodeParams : Optional[cudaKernelNodeParams]): * """ Creates a kernel execution node and adds it to a graph. */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_372cudaGraphAddKernelNode(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_graph, PyObject *__pyx_v_pDependencies, size_t __pyx_v_numDependencies, struct __pyx_obj_4cuda_8bindings_7runtime_cudaKernelNodeParams *__pyx_v_pNodeParams) { cudaGraph_t __pyx_v_cygraph; PyObject *__pyx_v_pgraph = NULL; struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *__pyx_v_pGraphNode = 0; cudaGraphNode_t *__pyx_v_cypDependencies; Py_ssize_t __pyx_v_idx; struct cudaKernelNodeParams *__pyx_v_cypNodeParams_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_gb_4cuda_8bindings_7runtime_22cudaGraphAddKernelNode_2generator94 = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; Py_ssize_t __pyx_t_8; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; Py_ssize_t __pyx_t_12; Py_ssize_t __pyx_t_13; struct cudaKernelNodeParams *__pyx_t_14; cudaError_t __pyx_t_15; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGraphAddKernelNode", 0); __Pyx_INCREF(__pyx_v_pDependencies); /* "cuda/bindings/runtime.pyx":25629 * Kernels launched using graphs must not use texture and surface references. Reading or writing through any texture or surface reference is undefined behavior. This restriction does not apply to texture and surface objects. * """ * pDependencies = [] if pDependencies is None else pDependencies # <<<<<<<<<<<<<< * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") */ __pyx_t_2 = (__pyx_v_pDependencies == Py_None); if (__pyx_t_2) { __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25629, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = __pyx_t_3; __pyx_t_3 = 0; } else { __Pyx_INCREF(__pyx_v_pDependencies); __pyx_t_1 = __pyx_v_pDependencies; } __Pyx_DECREF_SET(__pyx_v_pDependencies, __pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":25630 * """ * pDependencies = [] if pDependencies is None else pDependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): # <<<<<<<<<<<<<< * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph */ __pyx_t_1 = __pyx_pf_4cuda_8bindings_7runtime_22cudaGraphAddKernelNode_genexpr(NULL, __pyx_v_pDependencies); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25630, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_Generator_GetInlinedResult(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25630, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 25630, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_4 = (!__pyx_t_2); if (unlikely(__pyx_t_4)) { /* "cuda/bindings/runtime.pyx":25631 * pDependencies = [] if pDependencies is None else pDependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") # <<<<<<<<<<<<<< * cdef cyruntime.cudaGraph_t cygraph * if graph is None: */ __pyx_t_1 = NULL; __Pyx_INCREF(__pyx_builtin_TypeError); __pyx_t_5 = __pyx_builtin_TypeError; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_Argument_pDependencies_is_not_in}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25631, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 25631, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":25630 * """ * pDependencies = [] if pDependencies is None else pDependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): # <<<<<<<<<<<<<< * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph */ } /* "cuda/bindings/runtime.pyx":25633 * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph * if graph is None: # <<<<<<<<<<<<<< * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): */ __pyx_t_4 = (__pyx_v_graph == Py_None); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":25634 * cdef cyruntime.cudaGraph_t cygraph * if graph is None: * pgraph = 0 # <<<<<<<<<<<<<< * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): * pgraph = int(graph) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pgraph = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":25633 * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph * if graph is None: # <<<<<<<<<<<<<< * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): */ goto __pyx_L4; } /* "cuda/bindings/runtime.pyx":25635 * if graph is None: * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): # <<<<<<<<<<<<<< * pgraph = int(graph) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_graph, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); if (!__pyx_t_2) { } else { __pyx_t_4 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_graph, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraph); __pyx_t_4 = __pyx_t_2; __pyx_L5_bool_binop_done:; if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":25636 * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): * pgraph = int(graph) # <<<<<<<<<<<<<< * else: * pgraph = int(cudaGraph_t(graph)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_graph); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25636, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pgraph = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":25635 * if graph is None: * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): # <<<<<<<<<<<<<< * pgraph = int(graph) * else: */ goto __pyx_L4; } /* "cuda/bindings/runtime.pyx":25638 * pgraph = int(graph) * else: * pgraph = int(cudaGraph_t(graph)) # <<<<<<<<<<<<<< * cygraph = pgraph * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() */ /*else*/ { __pyx_t_5 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); __pyx_t_1 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_v_graph}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25638, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_1 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25638, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pgraph = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; } __pyx_L4:; /* "cuda/bindings/runtime.pyx":25639 * else: * pgraph = int(cudaGraph_t(graph)) * cygraph = pgraph # <<<<<<<<<<<<<< * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pgraph); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 25639, __pyx_L1_error) __pyx_v_cygraph = ((cudaGraph_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":25640 * pgraph = int(cudaGraph_t(graph)) * cygraph = pgraph * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() # <<<<<<<<<<<<<< * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL * if len(pDependencies) > 1: */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (1-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25640, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_pGraphNode = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":25641 * cygraph = pgraph * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL # <<<<<<<<<<<<<< * if len(pDependencies) > 1: * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) */ __pyx_v_cypDependencies = NULL; /* "cuda/bindings/runtime.pyx":25642 * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL * if len(pDependencies) > 1: # <<<<<<<<<<<<<< * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 25642, __pyx_L1_error) __pyx_t_4 = (__pyx_t_8 > 1); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":25643 * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL * if len(pDependencies) > 1: * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) # <<<<<<<<<<<<<< * if cypDependencies is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 25643, __pyx_L1_error) __pyx_v_cypDependencies = ((cudaGraphNode_t *)calloc(__pyx_t_8, (sizeof(cudaGraphNode_t)))); /* "cuda/bindings/runtime.pyx":25644 * if len(pDependencies) > 1: * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) * else: */ __pyx_t_4 = (__pyx_v_cypDependencies == NULL); if (unlikely(__pyx_t_4)) { /* "cuda/bindings/runtime.pyx":25645 * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) # <<<<<<<<<<<<<< * else: * for idx in range(len(pDependencies)): */ __pyx_t_5 = NULL; __Pyx_INCREF(__pyx_builtin_MemoryError); __pyx_t_3 = __pyx_builtin_MemoryError; __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 25645, __pyx_L1_error) __pyx_t_9 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25645, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 25645, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_Failed_to_allocate_length_x_size, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25645, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_x_2); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 25645, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyLong_FromSize_t((sizeof(cudaGraphNode_t))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25645, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_9); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 25645, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_10, __pyx_t_11); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25645, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_9}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25645, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 25645, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":25644 * if len(pDependencies) > 1: * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) * else: */ } /* "cuda/bindings/runtime.pyx":25647 * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) * else: * for idx in range(len(pDependencies)): # <<<<<<<<<<<<<< * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] * elif len(pDependencies) == 1: */ /*else*/ { __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 25647, __pyx_L1_error) __pyx_t_12 = __pyx_t_8; for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { __pyx_v_idx = __pyx_t_13; /* "cuda/bindings/runtime.pyx":25648 * else: * for idx in range(len(pDependencies)): * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] # <<<<<<<<<<<<<< * elif len(pDependencies) == 1: * cypDependencies = (pDependencies[0])._pvt_ptr */ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_pDependencies, __pyx_v_idx, Py_ssize_t, 1, PyLong_FromSsize_t, 0, 1, 1, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25648, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); (__pyx_v_cypDependencies[__pyx_v_idx]) = ((cudaGraphNode_t)(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *)__pyx_t_1)->__pyx_base._pvt_ptr[0])); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } } /* "cuda/bindings/runtime.pyx":25642 * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL * if len(pDependencies) > 1: # <<<<<<<<<<<<<< * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: */ goto __pyx_L7; } /* "cuda/bindings/runtime.pyx":25649 * for idx in range(len(pDependencies)): * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] * elif len(pDependencies) == 1: # <<<<<<<<<<<<<< * cypDependencies = (pDependencies[0])._pvt_ptr * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 25649, __pyx_L1_error) __pyx_t_4 = (__pyx_t_8 == 1); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":25650 * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] * elif len(pDependencies) == 1: * cypDependencies = (pDependencies[0])._pvt_ptr # <<<<<<<<<<<<<< * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) * cdef cyruntime.cudaKernelNodeParams* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL */ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_pDependencies, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25650, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_cypDependencies = ((cudaGraphNode_t *)((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *)__pyx_t_1)->__pyx_base._pvt_ptr); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":25649 * for idx in range(len(pDependencies)): * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] * elif len(pDependencies) == 1: # <<<<<<<<<<<<<< * cypDependencies = (pDependencies[0])._pvt_ptr * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) */ } __pyx_L7:; /* "cuda/bindings/runtime.pyx":25651 * elif len(pDependencies) == 1: * cypDependencies = (pDependencies[0])._pvt_ptr * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) # <<<<<<<<<<<<<< * cdef cyruntime.cudaKernelNodeParams* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL * with nogil: */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 25651, __pyx_L1_error) __pyx_t_4 = (__pyx_v_numDependencies > ((size_t)__pyx_t_8)); if (unlikely(__pyx_t_4)) { __pyx_t_3 = NULL; __Pyx_INCREF(__pyx_builtin_RuntimeError); __pyx_t_9 = __pyx_builtin_RuntimeError; __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 25651, __pyx_L1_error) __pyx_t_5 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25651, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 25651, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_List_is_too_small, __pyx_t_11); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25651, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_11 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_5, __pyx_mstate_global->__pyx_kp_u__6); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 25651, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyLong_FromSize_t(__pyx_v_numDependencies); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25651, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 25651, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_11, __pyx_t_10); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25651, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_5}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_9, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25651, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 25651, __pyx_L1_error) } /* "cuda/bindings/runtime.pyx":25652 * cypDependencies = (pDependencies[0])._pvt_ptr * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) * cdef cyruntime.cudaKernelNodeParams* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGraphAddKernelNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cypNodeParams_ptr) */ __pyx_t_4 = (((PyObject *)__pyx_v_pNodeParams) != Py_None); if (__pyx_t_4) { __pyx_t_14 = __pyx_v_pNodeParams->_pvt_ptr; } else { __pyx_t_14 = NULL; } __pyx_v_cypNodeParams_ptr = __pyx_t_14; /* "cuda/bindings/runtime.pyx":25653 * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) * cdef cyruntime.cudaKernelNodeParams* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphAddKernelNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cypNodeParams_ptr) * if len(pDependencies) > 1 and cypDependencies is not NULL: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":25654 * cdef cyruntime.cudaKernelNodeParams* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL * with nogil: * err = cyruntime.cudaGraphAddKernelNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cypNodeParams_ptr) # <<<<<<<<<<<<<< * if len(pDependencies) > 1 and cypDependencies is not NULL: * free(cypDependencies) */ __pyx_t_15 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGraphAddKernelNode(((cudaGraphNode_t *)__pyx_v_pGraphNode->__pyx_base._pvt_ptr), __pyx_v_cygraph, __pyx_v_cypDependencies, __pyx_v_numDependencies, __pyx_v_cypNodeParams_ptr); if (unlikely(__pyx_t_15 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25654, __pyx_L13_error) __pyx_v_err = __pyx_t_15; } /* "cuda/bindings/runtime.pyx":25653 * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) * cdef cyruntime.cudaKernelNodeParams* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphAddKernelNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cypNodeParams_ptr) * if len(pDependencies) > 1 and cypDependencies is not NULL: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L14; } __pyx_L13_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L14:; } } /* "cuda/bindings/runtime.pyx":25655 * with nogil: * err = cyruntime.cudaGraphAddKernelNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cypNodeParams_ptr) * if len(pDependencies) > 1 and cypDependencies is not NULL: # <<<<<<<<<<<<<< * free(cypDependencies) * if err != cyruntime.cudaSuccess: */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 25655, __pyx_L1_error) __pyx_t_2 = (__pyx_t_8 > 1); if (__pyx_t_2) { } else { __pyx_t_4 = __pyx_t_2; goto __pyx_L16_bool_binop_done; } __pyx_t_2 = (__pyx_v_cypDependencies != NULL); __pyx_t_4 = __pyx_t_2; __pyx_L16_bool_binop_done:; if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":25656 * err = cyruntime.cudaGraphAddKernelNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cypNodeParams_ptr) * if len(pDependencies) > 1 and cypDependencies is not NULL: * free(cypDependencies) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ free(__pyx_v_cypDependencies); /* "cuda/bindings/runtime.pyx":25655 * with nogil: * err = cyruntime.cudaGraphAddKernelNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cypNodeParams_ptr) * if len(pDependencies) > 1 and cypDependencies is not NULL: # <<<<<<<<<<<<<< * free(cypDependencies) * if err != cyruntime.cudaSuccess: */ } /* "cuda/bindings/runtime.pyx":25657 * if len(pDependencies) > 1 and cypDependencies is not NULL: * free(cypDependencies) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pGraphNode) */ __pyx_t_4 = (__pyx_v_err != cudaSuccess); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":25658 * free(cypDependencies) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], pGraphNode) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_9 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_9); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 25658, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 1, Py_None) != (0)) __PYX_ERR(0, 25658, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_9; __pyx_t_9 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":25657 * if len(pDependencies) > 1 and cypDependencies is not NULL: * free(cypDependencies) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pGraphNode) */ } /* "cuda/bindings/runtime.pyx":25659 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pGraphNode) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25659, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25659, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_9, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25659, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25659, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 25659, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_pGraphNode); __Pyx_GIVEREF((PyObject *)__pyx_v_pGraphNode); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject *)__pyx_v_pGraphNode)) != (0)) __PYX_ERR(0, 25659, __pyx_L1_error); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":25542 * return (_dict_cudaError_t[err], pGraph) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphAddKernelNode(graph, pDependencies : Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies, pNodeParams : Optional[cudaKernelNodeParams]): * """ Creates a kernel execution node and adds it to a graph. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphAddKernelNode", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pgraph); __Pyx_XDECREF((PyObject *)__pyx_v_pGraphNode); __Pyx_XDECREF(__pyx_gb_4cuda_8bindings_7runtime_22cudaGraphAddKernelNode_2generator94); __Pyx_XDECREF(__pyx_v_pDependencies); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":25661 * return (_dict_cudaError_t[err], pGraphNode) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphKernelNodeGetParams(node): * """ Returns a kernel node's parameters. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_375cudaGraphKernelNodeGetParams(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_374cudaGraphKernelNodeGetParams, "cudaGraphKernelNodeGetParams(node)\n\nReturns a kernel node's parameters.\n\nReturns the parameters of kernel node `node` in `pNodeParams`. The\n`kernelParams` or `extra` array returned in `pNodeParams`, as well as\nthe argument values it points to, are owned by the node. This memory\nremains valid until the node is destroyed or its parameters are\nmodified, and should not be modified directly. Use\n:py:obj:`~.cudaGraphKernelNodeSetParams` to update the parameters of\nthis node.\n\nThe params will contain either `kernelParams` or `extra`, according to\nwhich of these was most recently set on the node.\n\nParameters\n----------\nnode : :py:obj:`~.CUgraphNode` or :py:obj:`~.cudaGraphNode_t`\n Node to get the parameters for\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidDeviceFunction`\npNodeParams : :py:obj:`~.cudaKernelNodeParams`\n Pointer to return the parameters\n\nSee Also\n--------\n:py:obj:`~.cudaLaunchKernel`, :py:obj:`~.cudaGraphAddKernelNode`, :py:obj:`~.cudaGraphKernelNodeSetParams`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_375cudaGraphKernelNodeGetParams = {"cudaGraphKernelNodeGetParams", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_375cudaGraphKernelNodeGetParams, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_374cudaGraphKernelNodeGetParams}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_375cudaGraphKernelNodeGetParams(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_node = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGraphKernelNodeGetParams (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_node_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25661, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25661, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGraphKernelNodeGetParams", 0) < (0)) __PYX_ERR(0, 25661, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGraphKernelNodeGetParams", 1, 1, 1, i); __PYX_ERR(0, 25661, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25661, __pyx_L3_error) } __pyx_v_node = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGraphKernelNodeGetParams", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 25661, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphKernelNodeGetParams", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_374cudaGraphKernelNodeGetParams(__pyx_self, __pyx_v_node); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_374cudaGraphKernelNodeGetParams(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_node) { cudaGraphNode_t __pyx_v_cynode; PyObject *__pyx_v_pnode = NULL; struct __pyx_obj_4cuda_8bindings_7runtime_cudaKernelNodeParams *__pyx_v_pNodeParams = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGraphKernelNodeGetParams", 0); /* "cuda/bindings/runtime.pyx":25693 * """ * cdef cyruntime.cudaGraphNode_t cynode * if node is None: # <<<<<<<<<<<<<< * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): */ __pyx_t_1 = (__pyx_v_node == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":25694 * cdef cyruntime.cudaGraphNode_t cynode * if node is None: * pnode = 0 # <<<<<<<<<<<<<< * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): * pnode = int(node) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pnode = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":25693 * """ * cdef cyruntime.cudaGraphNode_t cynode * if node is None: # <<<<<<<<<<<<<< * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":25695 * if node is None: * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): # <<<<<<<<<<<<<< * pnode = int(node) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_node, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_node, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraphNode); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":25696 * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): * pnode = int(node) # <<<<<<<<<<<<<< * else: * pnode = int(cudaGraphNode_t(node)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_node); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25696, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pnode = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":25695 * if node is None: * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): # <<<<<<<<<<<<<< * pnode = int(node) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":25698 * pnode = int(node) * else: * pnode = int(cudaGraphNode_t(node)) # <<<<<<<<<<<<<< * cynode = pnode * cdef cudaKernelNodeParams pNodeParams = cudaKernelNodeParams() */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_node}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25698, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pnode = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":25699 * else: * pnode = int(cudaGraphNode_t(node)) * cynode = pnode # <<<<<<<<<<<<<< * cdef cudaKernelNodeParams pNodeParams = cudaKernelNodeParams() * with nogil: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pnode); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 25699, __pyx_L1_error) __pyx_v_cynode = ((cudaGraphNode_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":25700 * pnode = int(cudaGraphNode_t(node)) * cynode = pnode * cdef cudaKernelNodeParams pNodeParams = cudaKernelNodeParams() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGraphKernelNodeGetParams(cynode, pNodeParams._pvt_ptr) */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaKernelNodeParams); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaKernelNodeParams); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, NULL}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (1-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25700, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_pNodeParams = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaKernelNodeParams *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":25701 * cynode = pnode * cdef cudaKernelNodeParams pNodeParams = cudaKernelNodeParams() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphKernelNodeGetParams(cynode, pNodeParams._pvt_ptr) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":25702 * cdef cudaKernelNodeParams pNodeParams = cudaKernelNodeParams() * with nogil: * err = cyruntime.cudaGraphKernelNodeGetParams(cynode, pNodeParams._pvt_ptr) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGraphKernelNodeGetParams(__pyx_v_cynode, ((struct cudaKernelNodeParams *)__pyx_v_pNodeParams->_pvt_ptr)); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25702, __pyx_L7_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":25701 * cynode = pnode * cdef cudaKernelNodeParams pNodeParams = cudaKernelNodeParams() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphKernelNodeGetParams(cynode, pNodeParams._pvt_ptr) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":25703 * with nogil: * err = cyruntime.cudaGraphKernelNodeGetParams(cynode, pNodeParams._pvt_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pNodeParams) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":25704 * err = cyruntime.cudaGraphKernelNodeGetParams(cynode, pNodeParams._pvt_ptr) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], pNodeParams) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25704, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25704, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25704, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25704, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 25704, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, Py_None) != (0)) __PYX_ERR(0, 25704, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":25703 * with nogil: * err = cyruntime.cudaGraphKernelNodeGetParams(cynode, pNodeParams._pvt_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pNodeParams) */ } /* "cuda/bindings/runtime.pyx":25705 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pNodeParams) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25705, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25705, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25705, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25705, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 25705, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_pNodeParams); __Pyx_GIVEREF((PyObject *)__pyx_v_pNodeParams); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, ((PyObject *)__pyx_v_pNodeParams)) != (0)) __PYX_ERR(0, 25705, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":25661 * return (_dict_cudaError_t[err], pGraphNode) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphKernelNodeGetParams(node): * """ Returns a kernel node's parameters. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphKernelNodeGetParams", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pnode); __Pyx_XDECREF((PyObject *)__pyx_v_pNodeParams); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":25707 * return (_dict_cudaError_t[err], pNodeParams) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphKernelNodeSetParams(node, pNodeParams : Optional[cudaKernelNodeParams]): * """ Sets a kernel node's parameters. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_377cudaGraphKernelNodeSetParams(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_376cudaGraphKernelNodeSetParams, "cudaGraphKernelNodeSetParams(node, cudaKernelNodeParams pNodeParams: Optional[cudaKernelNodeParams])\n\nSets a kernel node's parameters.\n\nSets the parameters of kernel node `node` to `pNodeParams`.\n\nParameters\n----------\nnode : :py:obj:`~.CUgraphNode` or :py:obj:`~.cudaGraphNode_t`\n Node to set the parameters for\npNodeParams : :py:obj:`~.cudaKernelNodeParams`\n Parameters to copy\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidResourceHandle`, :py:obj:`~.cudaErrorMemoryAllocation`\n\nSee Also\n--------\n:py:obj:`~.cudaGraphNodeSetParams`, :py:obj:`~.cudaLaunchKernel`, :py:obj:`~.cudaGraphAddKernelNode`, :py:obj:`~.cudaGraphKernelNodeGetParams`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_377cudaGraphKernelNodeSetParams = {"cudaGraphKernelNodeSetParams", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_377cudaGraphKernelNodeSetParams, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_376cudaGraphKernelNodeSetParams}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_377cudaGraphKernelNodeSetParams(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_node = 0; struct __pyx_obj_4cuda_8bindings_7runtime_cudaKernelNodeParams *__pyx_v_pNodeParams = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGraphKernelNodeSetParams (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_node_2,&__pyx_mstate_global->__pyx_n_u_pNodeParams,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25707, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25707, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25707, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGraphKernelNodeSetParams", 0) < (0)) __PYX_ERR(0, 25707, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGraphKernelNodeSetParams", 1, 2, 2, i); __PYX_ERR(0, 25707, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25707, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25707, __pyx_L3_error) } __pyx_v_node = values[0]; __pyx_v_pNodeParams = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaKernelNodeParams *)values[1]); } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGraphKernelNodeSetParams", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 25707, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphKernelNodeSetParams", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_pNodeParams), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaKernelNodeParams, 1, "pNodeParams", 0))) __PYX_ERR(0, 25708, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_376cudaGraphKernelNodeSetParams(__pyx_self, __pyx_v_node, __pyx_v_pNodeParams); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_376cudaGraphKernelNodeSetParams(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_node, struct __pyx_obj_4cuda_8bindings_7runtime_cudaKernelNodeParams *__pyx_v_pNodeParams) { cudaGraphNode_t __pyx_v_cynode; PyObject *__pyx_v_pnode = NULL; struct cudaKernelNodeParams *__pyx_v_cypNodeParams_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; struct cudaKernelNodeParams *__pyx_t_8; cudaError_t __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGraphKernelNodeSetParams", 0); /* "cuda/bindings/runtime.pyx":25730 * """ * cdef cyruntime.cudaGraphNode_t cynode * if node is None: # <<<<<<<<<<<<<< * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): */ __pyx_t_1 = (__pyx_v_node == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":25731 * cdef cyruntime.cudaGraphNode_t cynode * if node is None: * pnode = 0 # <<<<<<<<<<<<<< * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): * pnode = int(node) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pnode = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":25730 * """ * cdef cyruntime.cudaGraphNode_t cynode * if node is None: # <<<<<<<<<<<<<< * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":25732 * if node is None: * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): # <<<<<<<<<<<<<< * pnode = int(node) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_node, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_node, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraphNode); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":25733 * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): * pnode = int(node) # <<<<<<<<<<<<<< * else: * pnode = int(cudaGraphNode_t(node)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_node); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25733, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pnode = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":25732 * if node is None: * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): # <<<<<<<<<<<<<< * pnode = int(node) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":25735 * pnode = int(node) * else: * pnode = int(cudaGraphNode_t(node)) # <<<<<<<<<<<<<< * cynode = pnode * cdef cyruntime.cudaKernelNodeParams* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_node}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25735, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25735, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pnode = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":25736 * else: * pnode = int(cudaGraphNode_t(node)) * cynode = pnode # <<<<<<<<<<<<<< * cdef cyruntime.cudaKernelNodeParams* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL * with nogil: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pnode); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 25736, __pyx_L1_error) __pyx_v_cynode = ((cudaGraphNode_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":25737 * pnode = int(cudaGraphNode_t(node)) * cynode = pnode * cdef cyruntime.cudaKernelNodeParams* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGraphKernelNodeSetParams(cynode, cypNodeParams_ptr) */ __pyx_t_1 = (((PyObject *)__pyx_v_pNodeParams) != Py_None); if (__pyx_t_1) { __pyx_t_8 = __pyx_v_pNodeParams->_pvt_ptr; } else { __pyx_t_8 = NULL; } __pyx_v_cypNodeParams_ptr = __pyx_t_8; /* "cuda/bindings/runtime.pyx":25738 * cynode = pnode * cdef cyruntime.cudaKernelNodeParams* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphKernelNodeSetParams(cynode, cypNodeParams_ptr) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":25739 * cdef cyruntime.cudaKernelNodeParams* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL * with nogil: * err = cyruntime.cudaGraphKernelNodeSetParams(cynode, cypNodeParams_ptr) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_9 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGraphKernelNodeSetParams(__pyx_v_cynode, __pyx_v_cypNodeParams_ptr); if (unlikely(__pyx_t_9 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25739, __pyx_L7_error) __pyx_v_err = __pyx_t_9; } /* "cuda/bindings/runtime.pyx":25738 * cynode = pnode * cdef cyruntime.cudaKernelNodeParams* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphKernelNodeSetParams(cynode, cypNodeParams_ptr) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":25740 * with nogil: * err = cyruntime.cudaGraphKernelNodeSetParams(cynode, cypNodeParams_ptr) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25740, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25740, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25740, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25740, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 25740, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":25707 * return (_dict_cudaError_t[err], pNodeParams) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphKernelNodeSetParams(node, pNodeParams : Optional[cudaKernelNodeParams]): * """ Sets a kernel node's parameters. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphKernelNodeSetParams", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pnode); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":25742 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphKernelNodeCopyAttributes(hSrc, hDst): * """ Copies attributes from source node to destination node. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_379cudaGraphKernelNodeCopyAttributes(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_378cudaGraphKernelNodeCopyAttributes, "cudaGraphKernelNodeCopyAttributes(hSrc, hDst)\n\nCopies attributes from source node to destination node.\n\nCopies attributes from source node `src` to destination node `dst`.\nBoth node must have the same context.\n\nParameters\n----------\ndst : :py:obj:`~.CUgraphNode` or :py:obj:`~.cudaGraphNode_t`\n Destination node\nsrc : :py:obj:`~.CUgraphNode` or :py:obj:`~.cudaGraphNode_t`\n Source node For list of attributes see\n :py:obj:`~.cudaKernelNodeAttrID`\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidContext`\n\nSee Also\n--------\n:py:obj:`~.cudaAccessPolicyWindow`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_379cudaGraphKernelNodeCopyAttributes = {"cudaGraphKernelNodeCopyAttributes", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_379cudaGraphKernelNodeCopyAttributes, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_378cudaGraphKernelNodeCopyAttributes}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_379cudaGraphKernelNodeCopyAttributes(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_hSrc = 0; PyObject *__pyx_v_hDst = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGraphKernelNodeCopyAttributes (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_hSrc,&__pyx_mstate_global->__pyx_n_u_hDst,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25742, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25742, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25742, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGraphKernelNodeCopyAttributes", 0) < (0)) __PYX_ERR(0, 25742, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGraphKernelNodeCopyAttributes", 1, 2, 2, i); __PYX_ERR(0, 25742, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25742, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25742, __pyx_L3_error) } __pyx_v_hSrc = values[0]; __pyx_v_hDst = values[1]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGraphKernelNodeCopyAttributes", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 25742, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphKernelNodeCopyAttributes", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_378cudaGraphKernelNodeCopyAttributes(__pyx_self, __pyx_v_hSrc, __pyx_v_hDst); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_378cudaGraphKernelNodeCopyAttributes(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_hSrc, PyObject *__pyx_v_hDst) { cudaGraphNode_t __pyx_v_cyhDst; PyObject *__pyx_v_phDst = NULL; cudaGraphNode_t __pyx_v_cyhSrc; PyObject *__pyx_v_phSrc = NULL; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGraphKernelNodeCopyAttributes", 0); /* "cuda/bindings/runtime.pyx":25767 * """ * cdef cyruntime.cudaGraphNode_t cyhDst * if hDst is None: # <<<<<<<<<<<<<< * phDst = 0 * elif isinstance(hDst, (cudaGraphNode_t,driver.CUgraphNode)): */ __pyx_t_1 = (__pyx_v_hDst == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":25768 * cdef cyruntime.cudaGraphNode_t cyhDst * if hDst is None: * phDst = 0 # <<<<<<<<<<<<<< * elif isinstance(hDst, (cudaGraphNode_t,driver.CUgraphNode)): * phDst = int(hDst) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_phDst = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":25767 * """ * cdef cyruntime.cudaGraphNode_t cyhDst * if hDst is None: # <<<<<<<<<<<<<< * phDst = 0 * elif isinstance(hDst, (cudaGraphNode_t,driver.CUgraphNode)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":25769 * if hDst is None: * phDst = 0 * elif isinstance(hDst, (cudaGraphNode_t,driver.CUgraphNode)): # <<<<<<<<<<<<<< * phDst = int(hDst) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_hDst, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_hDst, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraphNode); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":25770 * phDst = 0 * elif isinstance(hDst, (cudaGraphNode_t,driver.CUgraphNode)): * phDst = int(hDst) # <<<<<<<<<<<<<< * else: * phDst = int(cudaGraphNode_t(hDst)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_hDst); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25770, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_phDst = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":25769 * if hDst is None: * phDst = 0 * elif isinstance(hDst, (cudaGraphNode_t,driver.CUgraphNode)): # <<<<<<<<<<<<<< * phDst = int(hDst) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":25772 * phDst = int(hDst) * else: * phDst = int(cudaGraphNode_t(hDst)) # <<<<<<<<<<<<<< * cyhDst = phDst * cdef cyruntime.cudaGraphNode_t cyhSrc */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_hDst}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25772, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25772, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_phDst = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":25773 * else: * phDst = int(cudaGraphNode_t(hDst)) * cyhDst = phDst # <<<<<<<<<<<<<< * cdef cyruntime.cudaGraphNode_t cyhSrc * if hSrc is None: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_phDst); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 25773, __pyx_L1_error) __pyx_v_cyhDst = ((cudaGraphNode_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":25775 * cyhDst = phDst * cdef cyruntime.cudaGraphNode_t cyhSrc * if hSrc is None: # <<<<<<<<<<<<<< * phSrc = 0 * elif isinstance(hSrc, (cudaGraphNode_t,driver.CUgraphNode)): */ __pyx_t_1 = (__pyx_v_hSrc == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":25776 * cdef cyruntime.cudaGraphNode_t cyhSrc * if hSrc is None: * phSrc = 0 # <<<<<<<<<<<<<< * elif isinstance(hSrc, (cudaGraphNode_t,driver.CUgraphNode)): * phSrc = int(hSrc) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_phSrc = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":25775 * cyhDst = phDst * cdef cyruntime.cudaGraphNode_t cyhSrc * if hSrc is None: # <<<<<<<<<<<<<< * phSrc = 0 * elif isinstance(hSrc, (cudaGraphNode_t,driver.CUgraphNode)): */ goto __pyx_L6; } /* "cuda/bindings/runtime.pyx":25777 * if hSrc is None: * phSrc = 0 * elif isinstance(hSrc, (cudaGraphNode_t,driver.CUgraphNode)): # <<<<<<<<<<<<<< * phSrc = int(hSrc) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_hSrc, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L7_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_hSrc, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraphNode); __pyx_t_1 = __pyx_t_2; __pyx_L7_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":25778 * phSrc = 0 * elif isinstance(hSrc, (cudaGraphNode_t,driver.CUgraphNode)): * phSrc = int(hSrc) # <<<<<<<<<<<<<< * else: * phSrc = int(cudaGraphNode_t(hSrc)) */ __pyx_t_5 = __Pyx_PyNumber_Int(__pyx_v_hSrc); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25778, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_v_phSrc = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":25777 * if hSrc is None: * phSrc = 0 * elif isinstance(hSrc, (cudaGraphNode_t,driver.CUgraphNode)): # <<<<<<<<<<<<<< * phSrc = int(hSrc) * else: */ goto __pyx_L6; } /* "cuda/bindings/runtime.pyx":25780 * phSrc = int(hSrc) * else: * phSrc = int(cudaGraphNode_t(hSrc)) # <<<<<<<<<<<<<< * cyhSrc = phSrc * with nogil: */ /*else*/ { __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_hSrc}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25780, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_t_4 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_5)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25780, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF((PyObject *)__pyx_t_5); __pyx_t_5 = 0; __pyx_v_phSrc = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L6:; /* "cuda/bindings/runtime.pyx":25781 * else: * phSrc = int(cudaGraphNode_t(hSrc)) * cyhSrc = phSrc # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGraphKernelNodeCopyAttributes(cyhSrc, cyhDst) */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_phSrc); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 25781, __pyx_L1_error) __pyx_v_cyhSrc = ((cudaGraphNode_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":25782 * phSrc = int(cudaGraphNode_t(hSrc)) * cyhSrc = phSrc * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphKernelNodeCopyAttributes(cyhSrc, cyhDst) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":25783 * cyhSrc = phSrc * with nogil: * err = cyruntime.cudaGraphKernelNodeCopyAttributes(cyhSrc, cyhDst) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGraphKernelNodeCopyAttributes(__pyx_v_cyhSrc, __pyx_v_cyhDst); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25783, __pyx_L10_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":25782 * phSrc = int(cudaGraphNode_t(hSrc)) * cyhSrc = phSrc * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphKernelNodeCopyAttributes(cyhSrc, cyhDst) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L11; } __pyx_L10_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L11:; } } /* "cuda/bindings/runtime.pyx":25784 * with nogil: * err = cyruntime.cudaGraphKernelNodeCopyAttributes(cyhSrc, cyhDst) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25784, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25784, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25784, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25784, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 25784, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":25742 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphKernelNodeCopyAttributes(hSrc, hDst): * """ Copies attributes from source node to destination node. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphKernelNodeCopyAttributes", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_phDst); __Pyx_XDECREF(__pyx_v_phSrc); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":25786 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphKernelNodeGetAttribute(hNode, attr not None : cudaKernelNodeAttrID): * """ Queries node attribute. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_381cudaGraphKernelNodeGetAttribute(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_380cudaGraphKernelNodeGetAttribute, "cudaGraphKernelNodeGetAttribute(hNode, attr: cudaKernelNodeAttrID)\n\nQueries node attribute.\n\nQueries attribute `attr` from node `hNode` and stores it in\ncorresponding member of `value_out`.\n\nParameters\n----------\nhNode : :py:obj:`~.CUgraphNode` or :py:obj:`~.cudaGraphNode_t`\n\nattr : :py:obj:`~.cudaKernelNodeAttrID`\n\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidResourceHandle`\nvalue_out : :py:obj:`~.cudaKernelNodeAttrValue`\n\n\nSee Also\n--------\n:py:obj:`~.cudaAccessPolicyWindow`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_381cudaGraphKernelNodeGetAttribute = {"cudaGraphKernelNodeGetAttribute", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_381cudaGraphKernelNodeGetAttribute, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_380cudaGraphKernelNodeGetAttribute}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_381cudaGraphKernelNodeGetAttribute(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_hNode = 0; PyObject *__pyx_v_attr = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGraphKernelNodeGetAttribute (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_hNode,&__pyx_mstate_global->__pyx_n_u_attr,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25786, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25786, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25786, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGraphKernelNodeGetAttribute", 0) < (0)) __PYX_ERR(0, 25786, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGraphKernelNodeGetAttribute", 1, 2, 2, i); __PYX_ERR(0, 25786, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25786, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25786, __pyx_L3_error) } __pyx_v_hNode = values[0]; __pyx_v_attr = values[1]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGraphKernelNodeGetAttribute", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 25786, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphKernelNodeGetAttribute", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_attr) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "attr"); __PYX_ERR(0, 25787, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_380cudaGraphKernelNodeGetAttribute(__pyx_self, __pyx_v_hNode, __pyx_v_attr); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_380cudaGraphKernelNodeGetAttribute(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_hNode, PyObject *__pyx_v_attr) { cudaGraphNode_t __pyx_v_cyhNode; PyObject *__pyx_v_phNode = NULL; __pyx_t_4cuda_8bindings_9cyruntime_cudaKernelNodeAttrID __pyx_v_cyattr; struct __pyx_obj_4cuda_8bindings_7runtime_cudaKernelNodeAttrValue *__pyx_v_value_out = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; __pyx_t_4cuda_8bindings_9cyruntime_cudaKernelNodeAttrID __pyx_t_8; cudaError_t __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGraphKernelNodeGetAttribute", 0); /* "cuda/bindings/runtime.pyx":25812 * """ * cdef cyruntime.cudaGraphNode_t cyhNode * if hNode is None: # <<<<<<<<<<<<<< * phNode = 0 * elif isinstance(hNode, (cudaGraphNode_t,driver.CUgraphNode)): */ __pyx_t_1 = (__pyx_v_hNode == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":25813 * cdef cyruntime.cudaGraphNode_t cyhNode * if hNode is None: * phNode = 0 # <<<<<<<<<<<<<< * elif isinstance(hNode, (cudaGraphNode_t,driver.CUgraphNode)): * phNode = int(hNode) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_phNode = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":25812 * """ * cdef cyruntime.cudaGraphNode_t cyhNode * if hNode is None: # <<<<<<<<<<<<<< * phNode = 0 * elif isinstance(hNode, (cudaGraphNode_t,driver.CUgraphNode)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":25814 * if hNode is None: * phNode = 0 * elif isinstance(hNode, (cudaGraphNode_t,driver.CUgraphNode)): # <<<<<<<<<<<<<< * phNode = int(hNode) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_hNode, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_hNode, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraphNode); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":25815 * phNode = 0 * elif isinstance(hNode, (cudaGraphNode_t,driver.CUgraphNode)): * phNode = int(hNode) # <<<<<<<<<<<<<< * else: * phNode = int(cudaGraphNode_t(hNode)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_hNode); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25815, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_phNode = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":25814 * if hNode is None: * phNode = 0 * elif isinstance(hNode, (cudaGraphNode_t,driver.CUgraphNode)): # <<<<<<<<<<<<<< * phNode = int(hNode) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":25817 * phNode = int(hNode) * else: * phNode = int(cudaGraphNode_t(hNode)) # <<<<<<<<<<<<<< * cyhNode = phNode * cdef cyruntime.cudaKernelNodeAttrID cyattr = attr.value */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_hNode}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25817, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25817, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_phNode = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":25818 * else: * phNode = int(cudaGraphNode_t(hNode)) * cyhNode = phNode # <<<<<<<<<<<<<< * cdef cyruntime.cudaKernelNodeAttrID cyattr = attr.value * cdef cudaKernelNodeAttrValue value_out = cudaKernelNodeAttrValue() */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_phNode); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 25818, __pyx_L1_error) __pyx_v_cyhNode = ((cudaGraphNode_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":25819 * phNode = int(cudaGraphNode_t(hNode)) * cyhNode = phNode * cdef cyruntime.cudaKernelNodeAttrID cyattr = attr.value # <<<<<<<<<<<<<< * cdef cudaKernelNodeAttrValue value_out = cudaKernelNodeAttrValue() * with nogil: */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_attr, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25819, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_8 = ((enum cudaLaunchAttributeID)__Pyx_PyLong_As_enum__cudaLaunchAttributeID(__pyx_t_5)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 25819, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_cyattr = __pyx_t_8; /* "cuda/bindings/runtime.pyx":25820 * cyhNode = phNode * cdef cyruntime.cudaKernelNodeAttrID cyattr = attr.value * cdef cudaKernelNodeAttrValue value_out = cudaKernelNodeAttrValue() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGraphKernelNodeGetAttribute(cyhNode, cyattr, value_out._pvt_ptr) */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaKernelNodeAttrValue); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaKernelNodeAttrValue); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, NULL}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (1-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25820, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_value_out = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaKernelNodeAttrValue *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":25821 * cdef cyruntime.cudaKernelNodeAttrID cyattr = attr.value * cdef cudaKernelNodeAttrValue value_out = cudaKernelNodeAttrValue() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphKernelNodeGetAttribute(cyhNode, cyattr, value_out._pvt_ptr) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":25822 * cdef cudaKernelNodeAttrValue value_out = cudaKernelNodeAttrValue() * with nogil: * err = cyruntime.cudaGraphKernelNodeGetAttribute(cyhNode, cyattr, value_out._pvt_ptr) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_9 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGraphKernelNodeGetAttribute(__pyx_v_cyhNode, __pyx_v_cyattr, ((__pyx_t_4cuda_8bindings_9cyruntime_cudaKernelNodeAttrValue *)__pyx_v_value_out->__pyx_base._pvt_ptr)); if (unlikely(__pyx_t_9 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25822, __pyx_L7_error) __pyx_v_err = __pyx_t_9; } /* "cuda/bindings/runtime.pyx":25821 * cdef cyruntime.cudaKernelNodeAttrID cyattr = attr.value * cdef cudaKernelNodeAttrValue value_out = cudaKernelNodeAttrValue() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphKernelNodeGetAttribute(cyhNode, cyattr, value_out._pvt_ptr) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":25823 * with nogil: * err = cyruntime.cudaGraphKernelNodeGetAttribute(cyhNode, cyattr, value_out._pvt_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], value_out) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":25824 * err = cyruntime.cudaGraphKernelNodeGetAttribute(cyhNode, cyattr, value_out._pvt_ptr) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], value_out) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25824, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25824, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25824, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25824, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 25824, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, Py_None) != (0)) __PYX_ERR(0, 25824, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":25823 * with nogil: * err = cyruntime.cudaGraphKernelNodeGetAttribute(cyhNode, cyattr, value_out._pvt_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], value_out) */ } /* "cuda/bindings/runtime.pyx":25825 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], value_out) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25825, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25825, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25825, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25825, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 25825, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_value_out); __Pyx_GIVEREF((PyObject *)__pyx_v_value_out); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, ((PyObject *)__pyx_v_value_out)) != (0)) __PYX_ERR(0, 25825, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":25786 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphKernelNodeGetAttribute(hNode, attr not None : cudaKernelNodeAttrID): * """ Queries node attribute. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphKernelNodeGetAttribute", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_phNode); __Pyx_XDECREF((PyObject *)__pyx_v_value_out); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":25827 * return (_dict_cudaError_t[err], value_out) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphKernelNodeSetAttribute(hNode, attr not None : cudaKernelNodeAttrID, value : Optional[cudaKernelNodeAttrValue]): * """ Sets node attribute. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_383cudaGraphKernelNodeSetAttribute(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_382cudaGraphKernelNodeSetAttribute, "cudaGraphKernelNodeSetAttribute(hNode, attr: cudaKernelNodeAttrID, cudaKernelNodeAttrValue value: Optional[cudaKernelNodeAttrValue])\n\nSets node attribute.\n\nSets attribute `attr` on node `hNode` from corresponding attribute of\n`value`.\n\nParameters\n----------\nhNode : :py:obj:`~.CUgraphNode` or :py:obj:`~.cudaGraphNode_t`\n\nattr : :py:obj:`~.cudaKernelNodeAttrID`\n\nvalue : :py:obj:`~.cudaKernelNodeAttrValue`\n\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidResourceHandle`\n\nSee Also\n--------\n:py:obj:`~.cudaAccessPolicyWindow`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_383cudaGraphKernelNodeSetAttribute = {"cudaGraphKernelNodeSetAttribute", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_383cudaGraphKernelNodeSetAttribute, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_382cudaGraphKernelNodeSetAttribute}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_383cudaGraphKernelNodeSetAttribute(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_hNode = 0; PyObject *__pyx_v_attr = 0; struct __pyx_obj_4cuda_8bindings_7runtime_cudaKernelNodeAttrValue *__pyx_v_value = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[3] = {0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGraphKernelNodeSetAttribute (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_hNode,&__pyx_mstate_global->__pyx_n_u_attr,&__pyx_mstate_global->__pyx_n_u_value,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25827, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 25827, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25827, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25827, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGraphKernelNodeSetAttribute", 0) < (0)) __PYX_ERR(0, 25827, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 3; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGraphKernelNodeSetAttribute", 1, 3, 3, i); __PYX_ERR(0, 25827, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 3)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25827, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25827, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 25827, __pyx_L3_error) } __pyx_v_hNode = values[0]; __pyx_v_attr = values[1]; __pyx_v_value = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaKernelNodeAttrValue *)values[2]); } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGraphKernelNodeSetAttribute", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 25827, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphKernelNodeSetAttribute", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_attr) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "attr"); __PYX_ERR(0, 25828, __pyx_L1_error) } if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_value), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaKernelNodeAttrValue, 1, "value", 0))) __PYX_ERR(0, 25828, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_382cudaGraphKernelNodeSetAttribute(__pyx_self, __pyx_v_hNode, __pyx_v_attr, __pyx_v_value); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_382cudaGraphKernelNodeSetAttribute(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_hNode, PyObject *__pyx_v_attr, struct __pyx_obj_4cuda_8bindings_7runtime_cudaKernelNodeAttrValue *__pyx_v_value) { cudaGraphNode_t __pyx_v_cyhNode; PyObject *__pyx_v_phNode = NULL; __pyx_t_4cuda_8bindings_9cyruntime_cudaKernelNodeAttrID __pyx_v_cyattr; __pyx_t_4cuda_8bindings_9cyruntime_cudaKernelNodeAttrValue *__pyx_v_cyvalue_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; __pyx_t_4cuda_8bindings_9cyruntime_cudaKernelNodeAttrID __pyx_t_8; union cudaLaunchAttributeValue *__pyx_t_9; cudaError_t __pyx_t_10; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGraphKernelNodeSetAttribute", 0); /* "cuda/bindings/runtime.pyx":25853 * """ * cdef cyruntime.cudaGraphNode_t cyhNode * if hNode is None: # <<<<<<<<<<<<<< * phNode = 0 * elif isinstance(hNode, (cudaGraphNode_t,driver.CUgraphNode)): */ __pyx_t_1 = (__pyx_v_hNode == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":25854 * cdef cyruntime.cudaGraphNode_t cyhNode * if hNode is None: * phNode = 0 # <<<<<<<<<<<<<< * elif isinstance(hNode, (cudaGraphNode_t,driver.CUgraphNode)): * phNode = int(hNode) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_phNode = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":25853 * """ * cdef cyruntime.cudaGraphNode_t cyhNode * if hNode is None: # <<<<<<<<<<<<<< * phNode = 0 * elif isinstance(hNode, (cudaGraphNode_t,driver.CUgraphNode)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":25855 * if hNode is None: * phNode = 0 * elif isinstance(hNode, (cudaGraphNode_t,driver.CUgraphNode)): # <<<<<<<<<<<<<< * phNode = int(hNode) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_hNode, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_hNode, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraphNode); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":25856 * phNode = 0 * elif isinstance(hNode, (cudaGraphNode_t,driver.CUgraphNode)): * phNode = int(hNode) # <<<<<<<<<<<<<< * else: * phNode = int(cudaGraphNode_t(hNode)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_hNode); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25856, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_phNode = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":25855 * if hNode is None: * phNode = 0 * elif isinstance(hNode, (cudaGraphNode_t,driver.CUgraphNode)): # <<<<<<<<<<<<<< * phNode = int(hNode) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":25858 * phNode = int(hNode) * else: * phNode = int(cudaGraphNode_t(hNode)) # <<<<<<<<<<<<<< * cyhNode = phNode * cdef cyruntime.cudaKernelNodeAttrID cyattr = attr.value */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_hNode}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25858, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25858, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_phNode = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":25859 * else: * phNode = int(cudaGraphNode_t(hNode)) * cyhNode = phNode # <<<<<<<<<<<<<< * cdef cyruntime.cudaKernelNodeAttrID cyattr = attr.value * cdef cyruntime.cudaKernelNodeAttrValue* cyvalue_ptr = value._pvt_ptr if value is not None else NULL */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_phNode); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 25859, __pyx_L1_error) __pyx_v_cyhNode = ((cudaGraphNode_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":25860 * phNode = int(cudaGraphNode_t(hNode)) * cyhNode = phNode * cdef cyruntime.cudaKernelNodeAttrID cyattr = attr.value # <<<<<<<<<<<<<< * cdef cyruntime.cudaKernelNodeAttrValue* cyvalue_ptr = value._pvt_ptr if value is not None else NULL * with nogil: */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_attr, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25860, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_8 = ((enum cudaLaunchAttributeID)__Pyx_PyLong_As_enum__cudaLaunchAttributeID(__pyx_t_5)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 25860, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_cyattr = __pyx_t_8; /* "cuda/bindings/runtime.pyx":25861 * cyhNode = phNode * cdef cyruntime.cudaKernelNodeAttrID cyattr = attr.value * cdef cyruntime.cudaKernelNodeAttrValue* cyvalue_ptr = value._pvt_ptr if value is not None else NULL # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGraphKernelNodeSetAttribute(cyhNode, cyattr, cyvalue_ptr) */ __pyx_t_1 = (((PyObject *)__pyx_v_value) != Py_None); if (__pyx_t_1) { __pyx_t_9 = __pyx_v_value->__pyx_base._pvt_ptr; } else { __pyx_t_9 = NULL; } __pyx_v_cyvalue_ptr = __pyx_t_9; /* "cuda/bindings/runtime.pyx":25862 * cdef cyruntime.cudaKernelNodeAttrID cyattr = attr.value * cdef cyruntime.cudaKernelNodeAttrValue* cyvalue_ptr = value._pvt_ptr if value is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphKernelNodeSetAttribute(cyhNode, cyattr, cyvalue_ptr) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":25863 * cdef cyruntime.cudaKernelNodeAttrValue* cyvalue_ptr = value._pvt_ptr if value is not None else NULL * with nogil: * err = cyruntime.cudaGraphKernelNodeSetAttribute(cyhNode, cyattr, cyvalue_ptr) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_10 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGraphKernelNodeSetAttribute(__pyx_v_cyhNode, __pyx_v_cyattr, __pyx_v_cyvalue_ptr); if (unlikely(__pyx_t_10 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25863, __pyx_L7_error) __pyx_v_err = __pyx_t_10; } /* "cuda/bindings/runtime.pyx":25862 * cdef cyruntime.cudaKernelNodeAttrID cyattr = attr.value * cdef cyruntime.cudaKernelNodeAttrValue* cyvalue_ptr = value._pvt_ptr if value is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphKernelNodeSetAttribute(cyhNode, cyattr, cyvalue_ptr) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":25864 * with nogil: * err = cyruntime.cudaGraphKernelNodeSetAttribute(cyhNode, cyattr, cyvalue_ptr) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25864, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25864, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25864, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25864, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 25864, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":25827 * return (_dict_cudaError_t[err], value_out) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphKernelNodeSetAttribute(hNode, attr not None : cudaKernelNodeAttrID, value : Optional[cudaKernelNodeAttrValue]): * """ Sets node attribute. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphKernelNodeSetAttribute", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_phNode); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":25866 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphAddMemcpyNode(graph, pDependencies : Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies, pCopyParams : Optional[cudaMemcpy3DParms]): * """ Creates a memcpy node and adds it to a graph. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_385cudaGraphAddMemcpyNode(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_384cudaGraphAddMemcpyNode, "cudaGraphAddMemcpyNode(graph, pDependencies: Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies, cudaMemcpy3DParms pCopyParams: Optional[cudaMemcpy3DParms])\n\nCreates a memcpy node and adds it to a graph.\n\nCreates a new memcpy node and adds it to `graph` with `numDependencies`\ndependencies specified via `pDependencies`. It is possible for\n`numDependencies` to be 0, in which case the node will be placed at the\nroot of the graph. `pDependencies` may not have any duplicate entries.\nA handle to the new node will be returned in `pGraphNode`.\n\nWhen the graph is launched, the node will perform the memcpy described\nby `pCopyParams`. See :py:obj:`~.cudaMemcpy3D()` for a description of\nthe structure and its restrictions.\n\nMemcpy nodes have some additional restrictions with regards to managed\nmemory, if the system contains at least one device which has a zero\nvalue for the device attribute\n:py:obj:`~.cudaDevAttrConcurrentManagedAccess`.\n\nParameters\n----------\ngraph : :py:obj:`~.CUgraph` or :py:obj:`~.cudaGraph_t`\n Graph to which to add the node\npDependencies : list[:py:obj:`~.cudaGraphNode_t`]\n Dependencies of the node\nnumDependencies : size_t\n Number of dependencies\npCopyParams : :py:obj:`~.cudaMemcpy3DParms`\n Parameters for the memory copy\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\npGraphNode : :py:obj:`~.cudaGraphNode_t`\n Returns newly created node\n\nSee Also\n--------\n:py:obj:`~.cudaGraphAddNode`, :py:obj:`~.cudaMemcpy3D`, :py:obj:`~.cudaGraphAddMemcpyNodeToSymbol`, :py:obj:`~.cudaGraphAddMemcpyNodeFromSymbol`, :py:obj:`~.cudaGraphAddMemcpyNode1D`, :py:obj:`~.cudaGraphMemcpyNodeGetParams`, :py:obj:`~.cudaGraphMemcpyNodeSetParams`, :py:obj:`~.cudaGraphCreate`, :py:obj:`~.cudaGraphDestroyNode`, :py:obj:`~.cudaGraphAddChildGraphNode`, :py:obj:`~.cudaGraphAddEmptyNode`, :py:obj:`~.cudaGraphAddKernelNode`, :py:obj:`~.cudaGraphAddHostNode`, :py:obj:`~"".cudaGraphAddMemsetNode`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_385cudaGraphAddMemcpyNode = {"cudaGraphAddMemcpyNode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_385cudaGraphAddMemcpyNode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_384cudaGraphAddMemcpyNode}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_385cudaGraphAddMemcpyNode(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_graph = 0; PyObject *__pyx_v_pDependencies = 0; size_t __pyx_v_numDependencies; struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemcpy3DParms *__pyx_v_pCopyParams = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[4] = {0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGraphAddMemcpyNode (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_graph_2,&__pyx_mstate_global->__pyx_n_u_pDependencies,&__pyx_mstate_global->__pyx_n_u_numDependencies,&__pyx_mstate_global->__pyx_n_u_pCopyParams,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25866, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 25866, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 25866, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25866, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25866, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGraphAddMemcpyNode", 0) < (0)) __PYX_ERR(0, 25866, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 4; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGraphAddMemcpyNode", 1, 4, 4, i); __PYX_ERR(0, 25866, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 4)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25866, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25866, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 25866, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 25866, __pyx_L3_error) } __pyx_v_graph = values[0]; __pyx_v_pDependencies = values[1]; __pyx_v_numDependencies = __Pyx_PyLong_As_size_t(values[2]); if (unlikely((__pyx_v_numDependencies == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 25867, __pyx_L3_error) __pyx_v_pCopyParams = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemcpy3DParms *)values[3]); } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGraphAddMemcpyNode", 1, 4, 4, __pyx_nargs); __PYX_ERR(0, 25866, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphAddMemcpyNode", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_pCopyParams), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemcpy3DParms, 1, "pCopyParams", 0))) __PYX_ERR(0, 25867, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_384cudaGraphAddMemcpyNode(__pyx_self, __pyx_v_graph, __pyx_v_pDependencies, __pyx_v_numDependencies, __pyx_v_pCopyParams); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_22cudaGraphAddMemcpyNode_2generator95(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value); /* proto */ /* "cuda/bindings/runtime.pyx":25908 * """ * pDependencies = [] if pDependencies is None else pDependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): # <<<<<<<<<<<<<< * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_22cudaGraphAddMemcpyNode_genexpr(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_genexpr_arg_0) { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_95_genexpr *__pyx_cur_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("genexpr", 0); __pyx_cur_scope = (struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_95_genexpr *)__pyx_tp_new_4cuda_8bindings_7runtime___pyx_scope_struct_95_genexpr(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime___pyx_scope_struct_95_genexpr, __pyx_mstate_global->__pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_95_genexpr *)Py_None); __Pyx_INCREF(Py_None); __PYX_ERR(0, 25908, __pyx_L1_error) } else { __Pyx_GOTREF((PyObject *)__pyx_cur_scope); } __pyx_cur_scope->__pyx_genexpr_arg_0 = __pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_cur_scope->__pyx_genexpr_arg_0); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_genexpr_arg_0); { __pyx_CoroutineObject *gen = __Pyx_Generator_New((__pyx_coroutine_body_t) __pyx_gb_4cuda_8bindings_7runtime_22cudaGraphAddMemcpyNode_2generator95, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[95]), (PyObject *) __pyx_cur_scope, __pyx_mstate_global->__pyx_n_u_genexpr, __pyx_mstate_global->__pyx_n_u_cudaGraphAddMemcpyNode_locals_ge, __pyx_mstate_global->__pyx_n_u_cuda_bindings_runtime); if (unlikely(!gen)) __PYX_ERR(0, 25908, __pyx_L1_error) __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; } /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphAddMemcpyNode.genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_DECREF((PyObject *)__pyx_cur_scope); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_22cudaGraphAddMemcpyNode_2generator95(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value) /* generator body */ { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_95_genexpr *__pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_95_genexpr *)__pyx_generator->closure); PyObject *__pyx_r = NULL; PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *(*__pyx_t_3)(PyObject *); PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("genexpr", 0); switch (__pyx_generator->resume_label) { case 0: goto __pyx_L3_first_run; default: /* CPython raises the right error here */ __Pyx_RefNannyFinishContext(); return NULL; } __pyx_L3_first_run:; if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 25908, __pyx_L1_error) if (unlikely(!__pyx_cur_scope->__pyx_genexpr_arg_0)) { __Pyx_RaiseUnboundLocalError(".0"); __PYX_ERR(0, 25908, __pyx_L1_error) } if (likely(PyList_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) || PyTuple_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) { __pyx_t_1 = __pyx_cur_scope->__pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; __pyx_t_3 = NULL; } else { __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_cur_scope->__pyx_genexpr_arg_0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25908, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25908, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_3)) { if (likely(PyList_CheckExact(__pyx_t_1))) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 25908, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } __pyx_t_4 = __Pyx_PyList_GetItemRef(__pyx_t_1, __pyx_t_2); ++__pyx_t_2; } else { { Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 25908, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2)); #else __pyx_t_4 = __Pyx_PySequence_ITEM(__pyx_t_1, __pyx_t_2); #endif ++__pyx_t_2; } if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25908, __pyx_L1_error) } else { __pyx_t_4 = __pyx_t_3(__pyx_t_1); if (unlikely(!__pyx_t_4)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 25908, __pyx_L1_error) PyErr_Clear(); } break; } } __Pyx_GOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v__x); __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v__x, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_TypeCheck(__pyx_cur_scope->__pyx_v__x, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); if (!__pyx_t_6) { } else { __pyx_t_5 = __pyx_t_6; goto __pyx_L7_bool_binop_done; } __pyx_t_6 = __Pyx_TypeCheck(__pyx_cur_scope->__pyx_v__x, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraphNode); __pyx_t_5 = __pyx_t_6; __pyx_L7_bool_binop_done:; __pyx_t_6 = (!__pyx_t_5); if (__pyx_t_6) { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_False); __pyx_r = Py_False; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_True); __pyx_r = Py_True; goto __pyx_L0; } CYTHON_MAYBE_UNUSED_VAR(__pyx_cur_scope); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); if (__Pyx_PyErr_Occurred()) { __Pyx_Generator_Replace_StopIteration(0); __Pyx_AddTraceback("genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); } __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); #if !CYTHON_USE_EXC_INFO_STACK __Pyx_Coroutine_ResetAndClearException(__pyx_generator); #endif __pyx_generator->resume_label = -1; __Pyx_Coroutine_clear((PyObject*)__pyx_generator); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":25866 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphAddMemcpyNode(graph, pDependencies : Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies, pCopyParams : Optional[cudaMemcpy3DParms]): * """ Creates a memcpy node and adds it to a graph. */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_384cudaGraphAddMemcpyNode(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_graph, PyObject *__pyx_v_pDependencies, size_t __pyx_v_numDependencies, struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemcpy3DParms *__pyx_v_pCopyParams) { cudaGraph_t __pyx_v_cygraph; PyObject *__pyx_v_pgraph = NULL; struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *__pyx_v_pGraphNode = 0; cudaGraphNode_t *__pyx_v_cypDependencies; Py_ssize_t __pyx_v_idx; struct cudaMemcpy3DParms *__pyx_v_cypCopyParams_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_gb_4cuda_8bindings_7runtime_22cudaGraphAddMemcpyNode_2generator95 = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; Py_ssize_t __pyx_t_8; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; Py_ssize_t __pyx_t_12; Py_ssize_t __pyx_t_13; struct cudaMemcpy3DParms *__pyx_t_14; cudaError_t __pyx_t_15; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGraphAddMemcpyNode", 0); __Pyx_INCREF(__pyx_v_pDependencies); /* "cuda/bindings/runtime.pyx":25907 * :py:obj:`~.cudaGraphAddNode`, :py:obj:`~.cudaMemcpy3D`, :py:obj:`~.cudaGraphAddMemcpyNodeToSymbol`, :py:obj:`~.cudaGraphAddMemcpyNodeFromSymbol`, :py:obj:`~.cudaGraphAddMemcpyNode1D`, :py:obj:`~.cudaGraphMemcpyNodeGetParams`, :py:obj:`~.cudaGraphMemcpyNodeSetParams`, :py:obj:`~.cudaGraphCreate`, :py:obj:`~.cudaGraphDestroyNode`, :py:obj:`~.cudaGraphAddChildGraphNode`, :py:obj:`~.cudaGraphAddEmptyNode`, :py:obj:`~.cudaGraphAddKernelNode`, :py:obj:`~.cudaGraphAddHostNode`, :py:obj:`~.cudaGraphAddMemsetNode` * """ * pDependencies = [] if pDependencies is None else pDependencies # <<<<<<<<<<<<<< * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") */ __pyx_t_2 = (__pyx_v_pDependencies == Py_None); if (__pyx_t_2) { __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25907, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = __pyx_t_3; __pyx_t_3 = 0; } else { __Pyx_INCREF(__pyx_v_pDependencies); __pyx_t_1 = __pyx_v_pDependencies; } __Pyx_DECREF_SET(__pyx_v_pDependencies, __pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":25908 * """ * pDependencies = [] if pDependencies is None else pDependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): # <<<<<<<<<<<<<< * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph */ __pyx_t_1 = __pyx_pf_4cuda_8bindings_7runtime_22cudaGraphAddMemcpyNode_genexpr(NULL, __pyx_v_pDependencies); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25908, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_Generator_GetInlinedResult(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25908, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 25908, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_4 = (!__pyx_t_2); if (unlikely(__pyx_t_4)) { /* "cuda/bindings/runtime.pyx":25909 * pDependencies = [] if pDependencies is None else pDependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") # <<<<<<<<<<<<<< * cdef cyruntime.cudaGraph_t cygraph * if graph is None: */ __pyx_t_1 = NULL; __Pyx_INCREF(__pyx_builtin_TypeError); __pyx_t_5 = __pyx_builtin_TypeError; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_Argument_pDependencies_is_not_in}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25909, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 25909, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":25908 * """ * pDependencies = [] if pDependencies is None else pDependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): # <<<<<<<<<<<<<< * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph */ } /* "cuda/bindings/runtime.pyx":25911 * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph * if graph is None: # <<<<<<<<<<<<<< * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): */ __pyx_t_4 = (__pyx_v_graph == Py_None); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":25912 * cdef cyruntime.cudaGraph_t cygraph * if graph is None: * pgraph = 0 # <<<<<<<<<<<<<< * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): * pgraph = int(graph) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pgraph = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":25911 * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph * if graph is None: # <<<<<<<<<<<<<< * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): */ goto __pyx_L4; } /* "cuda/bindings/runtime.pyx":25913 * if graph is None: * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): # <<<<<<<<<<<<<< * pgraph = int(graph) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_graph, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); if (!__pyx_t_2) { } else { __pyx_t_4 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_graph, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraph); __pyx_t_4 = __pyx_t_2; __pyx_L5_bool_binop_done:; if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":25914 * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): * pgraph = int(graph) # <<<<<<<<<<<<<< * else: * pgraph = int(cudaGraph_t(graph)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_graph); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25914, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pgraph = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":25913 * if graph is None: * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): # <<<<<<<<<<<<<< * pgraph = int(graph) * else: */ goto __pyx_L4; } /* "cuda/bindings/runtime.pyx":25916 * pgraph = int(graph) * else: * pgraph = int(cudaGraph_t(graph)) # <<<<<<<<<<<<<< * cygraph = pgraph * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() */ /*else*/ { __pyx_t_5 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); __pyx_t_1 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_v_graph}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25916, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_1 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25916, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pgraph = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; } __pyx_L4:; /* "cuda/bindings/runtime.pyx":25917 * else: * pgraph = int(cudaGraph_t(graph)) * cygraph = pgraph # <<<<<<<<<<<<<< * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pgraph); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 25917, __pyx_L1_error) __pyx_v_cygraph = ((cudaGraph_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":25918 * pgraph = int(cudaGraph_t(graph)) * cygraph = pgraph * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() # <<<<<<<<<<<<<< * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL * if len(pDependencies) > 1: */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (1-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25918, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_pGraphNode = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":25919 * cygraph = pgraph * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL # <<<<<<<<<<<<<< * if len(pDependencies) > 1: * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) */ __pyx_v_cypDependencies = NULL; /* "cuda/bindings/runtime.pyx":25920 * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL * if len(pDependencies) > 1: # <<<<<<<<<<<<<< * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 25920, __pyx_L1_error) __pyx_t_4 = (__pyx_t_8 > 1); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":25921 * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL * if len(pDependencies) > 1: * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) # <<<<<<<<<<<<<< * if cypDependencies is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 25921, __pyx_L1_error) __pyx_v_cypDependencies = ((cudaGraphNode_t *)calloc(__pyx_t_8, (sizeof(cudaGraphNode_t)))); /* "cuda/bindings/runtime.pyx":25922 * if len(pDependencies) > 1: * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) * else: */ __pyx_t_4 = (__pyx_v_cypDependencies == NULL); if (unlikely(__pyx_t_4)) { /* "cuda/bindings/runtime.pyx":25923 * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) # <<<<<<<<<<<<<< * else: * for idx in range(len(pDependencies)): */ __pyx_t_5 = NULL; __Pyx_INCREF(__pyx_builtin_MemoryError); __pyx_t_3 = __pyx_builtin_MemoryError; __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 25923, __pyx_L1_error) __pyx_t_9 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25923, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 25923, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_Failed_to_allocate_length_x_size, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25923, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_x_2); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 25923, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyLong_FromSize_t((sizeof(cudaGraphNode_t))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25923, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_9); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 25923, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_10, __pyx_t_11); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25923, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_9}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25923, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 25923, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":25922 * if len(pDependencies) > 1: * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) * else: */ } /* "cuda/bindings/runtime.pyx":25925 * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) * else: * for idx in range(len(pDependencies)): # <<<<<<<<<<<<<< * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] * elif len(pDependencies) == 1: */ /*else*/ { __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 25925, __pyx_L1_error) __pyx_t_12 = __pyx_t_8; for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { __pyx_v_idx = __pyx_t_13; /* "cuda/bindings/runtime.pyx":25926 * else: * for idx in range(len(pDependencies)): * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] # <<<<<<<<<<<<<< * elif len(pDependencies) == 1: * cypDependencies = (pDependencies[0])._pvt_ptr */ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_pDependencies, __pyx_v_idx, Py_ssize_t, 1, PyLong_FromSsize_t, 0, 1, 1, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25926, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); (__pyx_v_cypDependencies[__pyx_v_idx]) = ((cudaGraphNode_t)(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *)__pyx_t_1)->__pyx_base._pvt_ptr[0])); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } } /* "cuda/bindings/runtime.pyx":25920 * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL * if len(pDependencies) > 1: # <<<<<<<<<<<<<< * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: */ goto __pyx_L7; } /* "cuda/bindings/runtime.pyx":25927 * for idx in range(len(pDependencies)): * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] * elif len(pDependencies) == 1: # <<<<<<<<<<<<<< * cypDependencies = (pDependencies[0])._pvt_ptr * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 25927, __pyx_L1_error) __pyx_t_4 = (__pyx_t_8 == 1); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":25928 * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] * elif len(pDependencies) == 1: * cypDependencies = (pDependencies[0])._pvt_ptr # <<<<<<<<<<<<<< * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) * cdef cyruntime.cudaMemcpy3DParms* cypCopyParams_ptr = pCopyParams._pvt_ptr if pCopyParams is not None else NULL */ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_pDependencies, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_cypDependencies = ((cudaGraphNode_t *)((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *)__pyx_t_1)->__pyx_base._pvt_ptr); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":25927 * for idx in range(len(pDependencies)): * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] * elif len(pDependencies) == 1: # <<<<<<<<<<<<<< * cypDependencies = (pDependencies[0])._pvt_ptr * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) */ } __pyx_L7:; /* "cuda/bindings/runtime.pyx":25929 * elif len(pDependencies) == 1: * cypDependencies = (pDependencies[0])._pvt_ptr * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemcpy3DParms* cypCopyParams_ptr = pCopyParams._pvt_ptr if pCopyParams is not None else NULL * with nogil: */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 25929, __pyx_L1_error) __pyx_t_4 = (__pyx_v_numDependencies > ((size_t)__pyx_t_8)); if (unlikely(__pyx_t_4)) { __pyx_t_3 = NULL; __Pyx_INCREF(__pyx_builtin_RuntimeError); __pyx_t_9 = __pyx_builtin_RuntimeError; __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 25929, __pyx_L1_error) __pyx_t_5 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25929, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 25929, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_List_is_too_small, __pyx_t_11); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25929, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_11 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_5, __pyx_mstate_global->__pyx_kp_u__6); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 25929, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyLong_FromSize_t(__pyx_v_numDependencies); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25929, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 25929, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_11, __pyx_t_10); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25929, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_5}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_9, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25929, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 25929, __pyx_L1_error) } /* "cuda/bindings/runtime.pyx":25930 * cypDependencies = (pDependencies[0])._pvt_ptr * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) * cdef cyruntime.cudaMemcpy3DParms* cypCopyParams_ptr = pCopyParams._pvt_ptr if pCopyParams is not None else NULL # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGraphAddMemcpyNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cypCopyParams_ptr) */ __pyx_t_4 = (((PyObject *)__pyx_v_pCopyParams) != Py_None); if (__pyx_t_4) { __pyx_t_14 = __pyx_v_pCopyParams->_pvt_ptr; } else { __pyx_t_14 = NULL; } __pyx_v_cypCopyParams_ptr = __pyx_t_14; /* "cuda/bindings/runtime.pyx":25931 * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) * cdef cyruntime.cudaMemcpy3DParms* cypCopyParams_ptr = pCopyParams._pvt_ptr if pCopyParams is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphAddMemcpyNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cypCopyParams_ptr) * if len(pDependencies) > 1 and cypDependencies is not NULL: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":25932 * cdef cyruntime.cudaMemcpy3DParms* cypCopyParams_ptr = pCopyParams._pvt_ptr if pCopyParams is not None else NULL * with nogil: * err = cyruntime.cudaGraphAddMemcpyNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cypCopyParams_ptr) # <<<<<<<<<<<<<< * if len(pDependencies) > 1 and cypDependencies is not NULL: * free(cypDependencies) */ __pyx_t_15 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGraphAddMemcpyNode(((cudaGraphNode_t *)__pyx_v_pGraphNode->__pyx_base._pvt_ptr), __pyx_v_cygraph, __pyx_v_cypDependencies, __pyx_v_numDependencies, __pyx_v_cypCopyParams_ptr); if (unlikely(__pyx_t_15 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 25932, __pyx_L13_error) __pyx_v_err = __pyx_t_15; } /* "cuda/bindings/runtime.pyx":25931 * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) * cdef cyruntime.cudaMemcpy3DParms* cypCopyParams_ptr = pCopyParams._pvt_ptr if pCopyParams is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphAddMemcpyNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cypCopyParams_ptr) * if len(pDependencies) > 1 and cypDependencies is not NULL: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L14; } __pyx_L13_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L14:; } } /* "cuda/bindings/runtime.pyx":25933 * with nogil: * err = cyruntime.cudaGraphAddMemcpyNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cypCopyParams_ptr) * if len(pDependencies) > 1 and cypDependencies is not NULL: # <<<<<<<<<<<<<< * free(cypDependencies) * if err != cyruntime.cudaSuccess: */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 25933, __pyx_L1_error) __pyx_t_2 = (__pyx_t_8 > 1); if (__pyx_t_2) { } else { __pyx_t_4 = __pyx_t_2; goto __pyx_L16_bool_binop_done; } __pyx_t_2 = (__pyx_v_cypDependencies != NULL); __pyx_t_4 = __pyx_t_2; __pyx_L16_bool_binop_done:; if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":25934 * err = cyruntime.cudaGraphAddMemcpyNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cypCopyParams_ptr) * if len(pDependencies) > 1 and cypDependencies is not NULL: * free(cypDependencies) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ free(__pyx_v_cypDependencies); /* "cuda/bindings/runtime.pyx":25933 * with nogil: * err = cyruntime.cudaGraphAddMemcpyNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cypCopyParams_ptr) * if len(pDependencies) > 1 and cypDependencies is not NULL: # <<<<<<<<<<<<<< * free(cypDependencies) * if err != cyruntime.cudaSuccess: */ } /* "cuda/bindings/runtime.pyx":25935 * if len(pDependencies) > 1 and cypDependencies is not NULL: * free(cypDependencies) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pGraphNode) */ __pyx_t_4 = (__pyx_v_err != cudaSuccess); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":25936 * free(cypDependencies) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], pGraphNode) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25936, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_9 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25936, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_9); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25936, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25936, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 25936, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 1, Py_None) != (0)) __PYX_ERR(0, 25936, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_9; __pyx_t_9 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":25935 * if len(pDependencies) > 1 and cypDependencies is not NULL: * free(cypDependencies) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pGraphNode) */ } /* "cuda/bindings/runtime.pyx":25937 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pGraphNode) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 25937, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25937, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_9, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25937, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25937, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 25937, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_pGraphNode); __Pyx_GIVEREF((PyObject *)__pyx_v_pGraphNode); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject *)__pyx_v_pGraphNode)) != (0)) __PYX_ERR(0, 25937, __pyx_L1_error); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":25866 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphAddMemcpyNode(graph, pDependencies : Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies, pCopyParams : Optional[cudaMemcpy3DParms]): * """ Creates a memcpy node and adds it to a graph. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphAddMemcpyNode", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pgraph); __Pyx_XDECREF((PyObject *)__pyx_v_pGraphNode); __Pyx_XDECREF(__pyx_gb_4cuda_8bindings_7runtime_22cudaGraphAddMemcpyNode_2generator95); __Pyx_XDECREF(__pyx_v_pDependencies); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":25939 * return (_dict_cudaError_t[err], pGraphNode) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphAddMemcpyNode1D(graph, pDependencies : Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies, dst, src, size_t count, kind not None : cudaMemcpyKind): * """ Creates a 1D memcpy node and adds it to a graph. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_387cudaGraphAddMemcpyNode1D(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_386cudaGraphAddMemcpyNode1D, "cudaGraphAddMemcpyNode1D(graph, pDependencies: Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies, dst, src, size_t count, kind: cudaMemcpyKind)\n\nCreates a 1D memcpy node and adds it to a graph.\n\nCreates a new 1D memcpy node and adds it to `graph` with\n`numDependencies` dependencies specified via `pDependencies`. It is\npossible for `numDependencies` to be 0, in which case the node will be\nplaced at the root of the graph. `pDependencies` may not have any\nduplicate entries. A handle to the new node will be returned in\n`pGraphNode`.\n\nWhen the graph is launched, the node will copy `count` bytes from the\nmemory area pointed to by `src` to the memory area pointed to by `dst`,\nwhere `kind` specifies the direction of the copy, and must be one of\n:py:obj:`~.cudaMemcpyHostToHost`, :py:obj:`~.cudaMemcpyHostToDevice`,\n:py:obj:`~.cudaMemcpyDeviceToHost`,\n:py:obj:`~.cudaMemcpyDeviceToDevice`, or :py:obj:`~.cudaMemcpyDefault`.\nPassing :py:obj:`~.cudaMemcpyDefault` is recommended, in which case the\ntype of transfer is inferred from the pointer values. However,\n:py:obj:`~.cudaMemcpyDefault` is only allowed on systems that support\nunified virtual addressing. Launching a memcpy node with dst and src\npointers that do not match the direction of the copy results in an\nundefined behavior.\n\nMemcpy nodes have some additional restrictions with regards to managed\nmemory, if the system contains at least one device which has a zero\nvalue for the device attribute\n:py:obj:`~.cudaDevAttrConcurrentManagedAccess`.\n\nParameters\n----------\ngraph : :py:obj:`~.CUgraph` or :py:obj:`~.cudaGraph_t`\n Graph to which to add the node\npDependencies : list[:py:obj:`~.cudaGraphNode_t`]\n Dependencies of the node\nnumDependencies : size_t\n Number of dependencies\ndst : Any\n Destination memory address\nsrc : Any\n Source memory address\ncount : size_t\n Size in bytes to copy\nkind : :py:obj:`~.cudaMemcpyKind`\n Type of transfer""\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\npGraphNode : :py:obj:`~.cudaGraphNode_t`\n Returns newly created node\n\nSee Also\n--------\n:py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaGraphAddMemcpyNode`, :py:obj:`~.cudaGraphMemcpyNodeGetParams`, :py:obj:`~.cudaGraphMemcpyNodeSetParams`, :py:obj:`~.cudaGraphMemcpyNodeSetParams1D`, :py:obj:`~.cudaGraphCreate`, :py:obj:`~.cudaGraphDestroyNode`, :py:obj:`~.cudaGraphAddChildGraphNode`, :py:obj:`~.cudaGraphAddEmptyNode`, :py:obj:`~.cudaGraphAddKernelNode`, :py:obj:`~.cudaGraphAddHostNode`, :py:obj:`~.cudaGraphAddMemsetNode`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_387cudaGraphAddMemcpyNode1D = {"cudaGraphAddMemcpyNode1D", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_387cudaGraphAddMemcpyNode1D, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_386cudaGraphAddMemcpyNode1D}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_387cudaGraphAddMemcpyNode1D(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_graph = 0; PyObject *__pyx_v_pDependencies = 0; size_t __pyx_v_numDependencies; PyObject *__pyx_v_dst = 0; PyObject *__pyx_v_src = 0; size_t __pyx_v_count; PyObject *__pyx_v_kind = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[7] = {0,0,0,0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGraphAddMemcpyNode1D (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_graph_2,&__pyx_mstate_global->__pyx_n_u_pDependencies,&__pyx_mstate_global->__pyx_n_u_numDependencies,&__pyx_mstate_global->__pyx_n_u_dst_2,&__pyx_mstate_global->__pyx_n_u_src_2,&__pyx_mstate_global->__pyx_n_u_count,&__pyx_mstate_global->__pyx_n_u_kind_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 25939, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 7: values[6] = __Pyx_ArgRef_FASTCALL(__pyx_args, 6); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[6])) __PYX_ERR(0, 25939, __pyx_L3_error) CYTHON_FALLTHROUGH; case 6: values[5] = __Pyx_ArgRef_FASTCALL(__pyx_args, 5); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[5])) __PYX_ERR(0, 25939, __pyx_L3_error) CYTHON_FALLTHROUGH; case 5: values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 25939, __pyx_L3_error) CYTHON_FALLTHROUGH; case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 25939, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 25939, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25939, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25939, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGraphAddMemcpyNode1D", 0) < (0)) __PYX_ERR(0, 25939, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 7; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGraphAddMemcpyNode1D", 1, 7, 7, i); __PYX_ERR(0, 25939, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 7)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 25939, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 25939, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 25939, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 25939, __pyx_L3_error) values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 25939, __pyx_L3_error) values[5] = __Pyx_ArgRef_FASTCALL(__pyx_args, 5); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[5])) __PYX_ERR(0, 25939, __pyx_L3_error) values[6] = __Pyx_ArgRef_FASTCALL(__pyx_args, 6); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[6])) __PYX_ERR(0, 25939, __pyx_L3_error) } __pyx_v_graph = values[0]; __pyx_v_pDependencies = values[1]; __pyx_v_numDependencies = __Pyx_PyLong_As_size_t(values[2]); if (unlikely((__pyx_v_numDependencies == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 25940, __pyx_L3_error) __pyx_v_dst = values[3]; __pyx_v_src = values[4]; __pyx_v_count = __Pyx_PyLong_As_size_t(values[5]); if (unlikely((__pyx_v_count == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 25940, __pyx_L3_error) __pyx_v_kind = values[6]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGraphAddMemcpyNode1D", 1, 7, 7, __pyx_nargs); __PYX_ERR(0, 25939, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphAddMemcpyNode1D", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_kind) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "kind"); __PYX_ERR(0, 25940, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_386cudaGraphAddMemcpyNode1D(__pyx_self, __pyx_v_graph, __pyx_v_pDependencies, __pyx_v_numDependencies, __pyx_v_dst, __pyx_v_src, __pyx_v_count, __pyx_v_kind); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_24cudaGraphAddMemcpyNode1D_2generator96(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value); /* proto */ /* "cuda/bindings/runtime.pyx":25997 * """ * pDependencies = [] if pDependencies is None else pDependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): # <<<<<<<<<<<<<< * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_24cudaGraphAddMemcpyNode1D_genexpr(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_genexpr_arg_0) { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_96_genexpr *__pyx_cur_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("genexpr", 0); __pyx_cur_scope = (struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_96_genexpr *)__pyx_tp_new_4cuda_8bindings_7runtime___pyx_scope_struct_96_genexpr(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime___pyx_scope_struct_96_genexpr, __pyx_mstate_global->__pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_96_genexpr *)Py_None); __Pyx_INCREF(Py_None); __PYX_ERR(0, 25997, __pyx_L1_error) } else { __Pyx_GOTREF((PyObject *)__pyx_cur_scope); } __pyx_cur_scope->__pyx_genexpr_arg_0 = __pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_cur_scope->__pyx_genexpr_arg_0); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_genexpr_arg_0); { __pyx_CoroutineObject *gen = __Pyx_Generator_New((__pyx_coroutine_body_t) __pyx_gb_4cuda_8bindings_7runtime_24cudaGraphAddMemcpyNode1D_2generator96, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[96]), (PyObject *) __pyx_cur_scope, __pyx_mstate_global->__pyx_n_u_genexpr, __pyx_mstate_global->__pyx_n_u_cudaGraphAddMemcpyNode1D_locals, __pyx_mstate_global->__pyx_n_u_cuda_bindings_runtime); if (unlikely(!gen)) __PYX_ERR(0, 25997, __pyx_L1_error) __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; } /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphAddMemcpyNode1D.genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_DECREF((PyObject *)__pyx_cur_scope); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_24cudaGraphAddMemcpyNode1D_2generator96(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value) /* generator body */ { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_96_genexpr *__pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_96_genexpr *)__pyx_generator->closure); PyObject *__pyx_r = NULL; PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *(*__pyx_t_3)(PyObject *); PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("genexpr", 0); switch (__pyx_generator->resume_label) { case 0: goto __pyx_L3_first_run; default: /* CPython raises the right error here */ __Pyx_RefNannyFinishContext(); return NULL; } __pyx_L3_first_run:; if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 25997, __pyx_L1_error) if (unlikely(!__pyx_cur_scope->__pyx_genexpr_arg_0)) { __Pyx_RaiseUnboundLocalError(".0"); __PYX_ERR(0, 25997, __pyx_L1_error) } if (likely(PyList_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) || PyTuple_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) { __pyx_t_1 = __pyx_cur_scope->__pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; __pyx_t_3 = NULL; } else { __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_cur_scope->__pyx_genexpr_arg_0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25997, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25997, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_3)) { if (likely(PyList_CheckExact(__pyx_t_1))) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 25997, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } __pyx_t_4 = __Pyx_PyList_GetItemRef(__pyx_t_1, __pyx_t_2); ++__pyx_t_2; } else { { Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 25997, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2)); #else __pyx_t_4 = __Pyx_PySequence_ITEM(__pyx_t_1, __pyx_t_2); #endif ++__pyx_t_2; } if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25997, __pyx_L1_error) } else { __pyx_t_4 = __pyx_t_3(__pyx_t_1); if (unlikely(!__pyx_t_4)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 25997, __pyx_L1_error) PyErr_Clear(); } break; } } __Pyx_GOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v__x); __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v__x, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_TypeCheck(__pyx_cur_scope->__pyx_v__x, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); if (!__pyx_t_6) { } else { __pyx_t_5 = __pyx_t_6; goto __pyx_L7_bool_binop_done; } __pyx_t_6 = __Pyx_TypeCheck(__pyx_cur_scope->__pyx_v__x, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraphNode); __pyx_t_5 = __pyx_t_6; __pyx_L7_bool_binop_done:; __pyx_t_6 = (!__pyx_t_5); if (__pyx_t_6) { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_False); __pyx_r = Py_False; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_True); __pyx_r = Py_True; goto __pyx_L0; } CYTHON_MAYBE_UNUSED_VAR(__pyx_cur_scope); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); if (__Pyx_PyErr_Occurred()) { __Pyx_Generator_Replace_StopIteration(0); __Pyx_AddTraceback("genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); } __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); #if !CYTHON_USE_EXC_INFO_STACK __Pyx_Coroutine_ResetAndClearException(__pyx_generator); #endif __pyx_generator->resume_label = -1; __Pyx_Coroutine_clear((PyObject*)__pyx_generator); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":25939 * return (_dict_cudaError_t[err], pGraphNode) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphAddMemcpyNode1D(graph, pDependencies : Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies, dst, src, size_t count, kind not None : cudaMemcpyKind): * """ Creates a 1D memcpy node and adds it to a graph. */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_386cudaGraphAddMemcpyNode1D(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_graph, PyObject *__pyx_v_pDependencies, size_t __pyx_v_numDependencies, PyObject *__pyx_v_dst, PyObject *__pyx_v_src, size_t __pyx_v_count, PyObject *__pyx_v_kind) { cudaGraph_t __pyx_v_cygraph; PyObject *__pyx_v_pgraph = NULL; struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *__pyx_v_pGraphNode = 0; cudaGraphNode_t *__pyx_v_cypDependencies; Py_ssize_t __pyx_v_idx; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cydst = NULL; void *__pyx_v_cydst_ptr; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cysrc = NULL; void *__pyx_v_cysrc_ptr; enum cudaMemcpyKind __pyx_v_cykind; cudaError_t __pyx_v_err; PyObject *__pyx_gb_4cuda_8bindings_7runtime_24cudaGraphAddMemcpyNode1D_2generator96 = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; Py_ssize_t __pyx_t_8; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; Py_ssize_t __pyx_t_12; Py_ssize_t __pyx_t_13; enum cudaMemcpyKind __pyx_t_14; cudaError_t __pyx_t_15; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGraphAddMemcpyNode1D", 0); __Pyx_INCREF(__pyx_v_pDependencies); /* "cuda/bindings/runtime.pyx":25996 * :py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaGraphAddMemcpyNode`, :py:obj:`~.cudaGraphMemcpyNodeGetParams`, :py:obj:`~.cudaGraphMemcpyNodeSetParams`, :py:obj:`~.cudaGraphMemcpyNodeSetParams1D`, :py:obj:`~.cudaGraphCreate`, :py:obj:`~.cudaGraphDestroyNode`, :py:obj:`~.cudaGraphAddChildGraphNode`, :py:obj:`~.cudaGraphAddEmptyNode`, :py:obj:`~.cudaGraphAddKernelNode`, :py:obj:`~.cudaGraphAddHostNode`, :py:obj:`~.cudaGraphAddMemsetNode` * """ * pDependencies = [] if pDependencies is None else pDependencies # <<<<<<<<<<<<<< * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") */ __pyx_t_2 = (__pyx_v_pDependencies == Py_None); if (__pyx_t_2) { __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25996, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = __pyx_t_3; __pyx_t_3 = 0; } else { __Pyx_INCREF(__pyx_v_pDependencies); __pyx_t_1 = __pyx_v_pDependencies; } __Pyx_DECREF_SET(__pyx_v_pDependencies, __pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":25997 * """ * pDependencies = [] if pDependencies is None else pDependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): # <<<<<<<<<<<<<< * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph */ __pyx_t_1 = __pyx_pf_4cuda_8bindings_7runtime_24cudaGraphAddMemcpyNode1D_genexpr(NULL, __pyx_v_pDependencies); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 25997, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_Generator_GetInlinedResult(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25997, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 25997, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_4 = (!__pyx_t_2); if (unlikely(__pyx_t_4)) { /* "cuda/bindings/runtime.pyx":25998 * pDependencies = [] if pDependencies is None else pDependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") # <<<<<<<<<<<<<< * cdef cyruntime.cudaGraph_t cygraph * if graph is None: */ __pyx_t_1 = NULL; __Pyx_INCREF(__pyx_builtin_TypeError); __pyx_t_5 = __pyx_builtin_TypeError; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_Argument_pDependencies_is_not_in}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25998, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 25998, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":25997 * """ * pDependencies = [] if pDependencies is None else pDependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): # <<<<<<<<<<<<<< * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph */ } /* "cuda/bindings/runtime.pyx":26000 * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph * if graph is None: # <<<<<<<<<<<<<< * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): */ __pyx_t_4 = (__pyx_v_graph == Py_None); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":26001 * cdef cyruntime.cudaGraph_t cygraph * if graph is None: * pgraph = 0 # <<<<<<<<<<<<<< * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): * pgraph = int(graph) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pgraph = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":26000 * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph * if graph is None: # <<<<<<<<<<<<<< * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): */ goto __pyx_L4; } /* "cuda/bindings/runtime.pyx":26002 * if graph is None: * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): # <<<<<<<<<<<<<< * pgraph = int(graph) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_graph, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); if (!__pyx_t_2) { } else { __pyx_t_4 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_graph, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraph); __pyx_t_4 = __pyx_t_2; __pyx_L5_bool_binop_done:; if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":26003 * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): * pgraph = int(graph) # <<<<<<<<<<<<<< * else: * pgraph = int(cudaGraph_t(graph)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_graph); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26003, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pgraph = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":26002 * if graph is None: * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): # <<<<<<<<<<<<<< * pgraph = int(graph) * else: */ goto __pyx_L4; } /* "cuda/bindings/runtime.pyx":26005 * pgraph = int(graph) * else: * pgraph = int(cudaGraph_t(graph)) # <<<<<<<<<<<<<< * cygraph = pgraph * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() */ /*else*/ { __pyx_t_5 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); __pyx_t_1 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_v_graph}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26005, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_1 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26005, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pgraph = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; } __pyx_L4:; /* "cuda/bindings/runtime.pyx":26006 * else: * pgraph = int(cudaGraph_t(graph)) * cygraph = pgraph # <<<<<<<<<<<<<< * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pgraph); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 26006, __pyx_L1_error) __pyx_v_cygraph = ((cudaGraph_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":26007 * pgraph = int(cudaGraph_t(graph)) * cygraph = pgraph * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() # <<<<<<<<<<<<<< * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL * if len(pDependencies) > 1: */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (1-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26007, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_pGraphNode = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":26008 * cygraph = pgraph * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL # <<<<<<<<<<<<<< * if len(pDependencies) > 1: * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) */ __pyx_v_cypDependencies = NULL; /* "cuda/bindings/runtime.pyx":26009 * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL * if len(pDependencies) > 1: # <<<<<<<<<<<<<< * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26009, __pyx_L1_error) __pyx_t_4 = (__pyx_t_8 > 1); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":26010 * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL * if len(pDependencies) > 1: * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) # <<<<<<<<<<<<<< * if cypDependencies is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26010, __pyx_L1_error) __pyx_v_cypDependencies = ((cudaGraphNode_t *)calloc(__pyx_t_8, (sizeof(cudaGraphNode_t)))); /* "cuda/bindings/runtime.pyx":26011 * if len(pDependencies) > 1: * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) * else: */ __pyx_t_4 = (__pyx_v_cypDependencies == NULL); if (unlikely(__pyx_t_4)) { /* "cuda/bindings/runtime.pyx":26012 * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) # <<<<<<<<<<<<<< * else: * for idx in range(len(pDependencies)): */ __pyx_t_5 = NULL; __Pyx_INCREF(__pyx_builtin_MemoryError); __pyx_t_3 = __pyx_builtin_MemoryError; __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26012, __pyx_L1_error) __pyx_t_9 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26012, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 26012, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_Failed_to_allocate_length_x_size, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26012, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_x_2); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 26012, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyLong_FromSize_t((sizeof(cudaGraphNode_t))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26012, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_9); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 26012, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_10, __pyx_t_11); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26012, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_9}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26012, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 26012, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":26011 * if len(pDependencies) > 1: * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) * else: */ } /* "cuda/bindings/runtime.pyx":26014 * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) * else: * for idx in range(len(pDependencies)): # <<<<<<<<<<<<<< * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] * elif len(pDependencies) == 1: */ /*else*/ { __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26014, __pyx_L1_error) __pyx_t_12 = __pyx_t_8; for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { __pyx_v_idx = __pyx_t_13; /* "cuda/bindings/runtime.pyx":26015 * else: * for idx in range(len(pDependencies)): * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] # <<<<<<<<<<<<<< * elif len(pDependencies) == 1: * cypDependencies = (pDependencies[0])._pvt_ptr */ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_pDependencies, __pyx_v_idx, Py_ssize_t, 1, PyLong_FromSsize_t, 0, 1, 1, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26015, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); (__pyx_v_cypDependencies[__pyx_v_idx]) = ((cudaGraphNode_t)(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *)__pyx_t_1)->__pyx_base._pvt_ptr[0])); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } } /* "cuda/bindings/runtime.pyx":26009 * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL * if len(pDependencies) > 1: # <<<<<<<<<<<<<< * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: */ goto __pyx_L7; } /* "cuda/bindings/runtime.pyx":26016 * for idx in range(len(pDependencies)): * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] * elif len(pDependencies) == 1: # <<<<<<<<<<<<<< * cypDependencies = (pDependencies[0])._pvt_ptr * cydst = _HelperInputVoidPtr(dst) */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26016, __pyx_L1_error) __pyx_t_4 = (__pyx_t_8 == 1); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":26017 * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] * elif len(pDependencies) == 1: * cypDependencies = (pDependencies[0])._pvt_ptr # <<<<<<<<<<<<<< * cydst = _HelperInputVoidPtr(dst) * cdef void* cydst_ptr = cydst.cptr */ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_pDependencies, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26017, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_cypDependencies = ((cudaGraphNode_t *)((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *)__pyx_t_1)->__pyx_base._pvt_ptr); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":26016 * for idx in range(len(pDependencies)): * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] * elif len(pDependencies) == 1: # <<<<<<<<<<<<<< * cypDependencies = (pDependencies[0])._pvt_ptr * cydst = _HelperInputVoidPtr(dst) */ } __pyx_L7:; /* "cuda/bindings/runtime.pyx":26018 * elif len(pDependencies) == 1: * cypDependencies = (pDependencies[0])._pvt_ptr * cydst = _HelperInputVoidPtr(dst) # <<<<<<<<<<<<<< * cdef void* cydst_ptr = cydst.cptr * cysrc = _HelperInputVoidPtr(src) */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_9 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_dst}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_9, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26018, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_cydst = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":26019 * cypDependencies = (pDependencies[0])._pvt_ptr * cydst = _HelperInputVoidPtr(dst) * cdef void* cydst_ptr = cydst.cptr # <<<<<<<<<<<<<< * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cydst), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26019, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 26019, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cydst_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":26020 * cydst = _HelperInputVoidPtr(dst) * cdef void* cydst_ptr = cydst.cptr * cysrc = _HelperInputVoidPtr(src) # <<<<<<<<<<<<<< * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value */ __pyx_t_9 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_9, __pyx_v_src}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26020, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_cysrc = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":26021 * cdef void* cydst_ptr = cydst.cptr * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cysrc), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26021, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_1); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 26021, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cysrc_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":26022 * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGraphAddMemcpyNode1D(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cydst_ptr, cysrc_ptr, count, cykind) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_kind, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26022, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_14 = ((enum cudaMemcpyKind)__Pyx_PyLong_As_enum__cudaMemcpyKind(__pyx_t_1)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 26022, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_cykind = __pyx_t_14; /* "cuda/bindings/runtime.pyx":26023 * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphAddMemcpyNode1D(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cydst_ptr, cysrc_ptr, count, cykind) * if len(pDependencies) > 1 and cypDependencies is not NULL: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":26024 * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: * err = cyruntime.cudaGraphAddMemcpyNode1D(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cydst_ptr, cysrc_ptr, count, cykind) # <<<<<<<<<<<<<< * if len(pDependencies) > 1 and cypDependencies is not NULL: * free(cypDependencies) */ __pyx_t_15 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGraphAddMemcpyNode1D(((cudaGraphNode_t *)__pyx_v_pGraphNode->__pyx_base._pvt_ptr), __pyx_v_cygraph, __pyx_v_cypDependencies, __pyx_v_numDependencies, __pyx_v_cydst_ptr, __pyx_v_cysrc_ptr, __pyx_v_count, __pyx_v_cykind); if (unlikely(__pyx_t_15 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 26024, __pyx_L12_error) __pyx_v_err = __pyx_t_15; } /* "cuda/bindings/runtime.pyx":26023 * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphAddMemcpyNode1D(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cydst_ptr, cysrc_ptr, count, cykind) * if len(pDependencies) > 1 and cypDependencies is not NULL: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L13; } __pyx_L12_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L13:; } } /* "cuda/bindings/runtime.pyx":26025 * with nogil: * err = cyruntime.cudaGraphAddMemcpyNode1D(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cydst_ptr, cysrc_ptr, count, cykind) * if len(pDependencies) > 1 and cypDependencies is not NULL: # <<<<<<<<<<<<<< * free(cypDependencies) * if err != cyruntime.cudaSuccess: */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26025, __pyx_L1_error) __pyx_t_2 = (__pyx_t_8 > 1); if (__pyx_t_2) { } else { __pyx_t_4 = __pyx_t_2; goto __pyx_L15_bool_binop_done; } __pyx_t_2 = (__pyx_v_cypDependencies != NULL); __pyx_t_4 = __pyx_t_2; __pyx_L15_bool_binop_done:; if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":26026 * err = cyruntime.cudaGraphAddMemcpyNode1D(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cydst_ptr, cysrc_ptr, count, cykind) * if len(pDependencies) > 1 and cypDependencies is not NULL: * free(cypDependencies) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ free(__pyx_v_cypDependencies); /* "cuda/bindings/runtime.pyx":26025 * with nogil: * err = cyruntime.cudaGraphAddMemcpyNode1D(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cydst_ptr, cysrc_ptr, count, cykind) * if len(pDependencies) > 1 and cypDependencies is not NULL: # <<<<<<<<<<<<<< * free(cypDependencies) * if err != cyruntime.cudaSuccess: */ } /* "cuda/bindings/runtime.pyx":26027 * if len(pDependencies) > 1 and cypDependencies is not NULL: * free(cypDependencies) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pGraphNode) */ __pyx_t_4 = (__pyx_v_err != cudaSuccess); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":26028 * free(cypDependencies) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], pGraphNode) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26028, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26028, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_9 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26028, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26028, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_9); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_9) != (0)) __PYX_ERR(0, 26028, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None) != (0)) __PYX_ERR(0, 26028, __pyx_L1_error); __pyx_t_9 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":26027 * if len(pDependencies) > 1 and cypDependencies is not NULL: * free(cypDependencies) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pGraphNode) */ } /* "cuda/bindings/runtime.pyx":26029 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pGraphNode) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26029, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_9 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26029, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_3, __pyx_t_9); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26029, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26029, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 26029, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_pGraphNode); __Pyx_GIVEREF((PyObject *)__pyx_v_pGraphNode); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 1, ((PyObject *)__pyx_v_pGraphNode)) != (0)) __PYX_ERR(0, 26029, __pyx_L1_error); __pyx_t_1 = 0; __pyx_r = __pyx_t_9; __pyx_t_9 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":25939 * return (_dict_cudaError_t[err], pGraphNode) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphAddMemcpyNode1D(graph, pDependencies : Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies, dst, src, size_t count, kind not None : cudaMemcpyKind): * """ Creates a 1D memcpy node and adds it to a graph. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphAddMemcpyNode1D", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pgraph); __Pyx_XDECREF((PyObject *)__pyx_v_pGraphNode); __Pyx_XDECREF((PyObject *)__pyx_v_cydst); __Pyx_XDECREF((PyObject *)__pyx_v_cysrc); __Pyx_XDECREF(__pyx_gb_4cuda_8bindings_7runtime_24cudaGraphAddMemcpyNode1D_2generator96); __Pyx_XDECREF(__pyx_v_pDependencies); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":26031 * return (_dict_cudaError_t[err], pGraphNode) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphMemcpyNodeGetParams(node): * """ Returns a memcpy node's parameters. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_389cudaGraphMemcpyNodeGetParams(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_388cudaGraphMemcpyNodeGetParams, "cudaGraphMemcpyNodeGetParams(node)\n\nReturns a memcpy node's parameters.\n\nReturns the parameters of memcpy node `node` in `pNodeParams`.\n\nParameters\n----------\nnode : :py:obj:`~.CUgraphNode` or :py:obj:`~.cudaGraphNode_t`\n Node to get the parameters for\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\npNodeParams : :py:obj:`~.cudaMemcpy3DParms`\n Pointer to return the parameters\n\nSee Also\n--------\n:py:obj:`~.cudaMemcpy3D`, :py:obj:`~.cudaGraphAddMemcpyNode`, :py:obj:`~.cudaGraphMemcpyNodeSetParams`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_389cudaGraphMemcpyNodeGetParams = {"cudaGraphMemcpyNodeGetParams", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_389cudaGraphMemcpyNodeGetParams, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_388cudaGraphMemcpyNodeGetParams}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_389cudaGraphMemcpyNodeGetParams(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_node = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGraphMemcpyNodeGetParams (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_node_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 26031, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 26031, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGraphMemcpyNodeGetParams", 0) < (0)) __PYX_ERR(0, 26031, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGraphMemcpyNodeGetParams", 1, 1, 1, i); __PYX_ERR(0, 26031, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 26031, __pyx_L3_error) } __pyx_v_node = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGraphMemcpyNodeGetParams", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 26031, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphMemcpyNodeGetParams", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_388cudaGraphMemcpyNodeGetParams(__pyx_self, __pyx_v_node); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_388cudaGraphMemcpyNodeGetParams(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_node) { cudaGraphNode_t __pyx_v_cynode; PyObject *__pyx_v_pnode = NULL; struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemcpy3DParms *__pyx_v_pNodeParams = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGraphMemcpyNodeGetParams", 0); /* "cuda/bindings/runtime.pyx":26054 * """ * cdef cyruntime.cudaGraphNode_t cynode * if node is None: # <<<<<<<<<<<<<< * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): */ __pyx_t_1 = (__pyx_v_node == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":26055 * cdef cyruntime.cudaGraphNode_t cynode * if node is None: * pnode = 0 # <<<<<<<<<<<<<< * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): * pnode = int(node) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pnode = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":26054 * """ * cdef cyruntime.cudaGraphNode_t cynode * if node is None: # <<<<<<<<<<<<<< * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":26056 * if node is None: * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): # <<<<<<<<<<<<<< * pnode = int(node) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_node, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_node, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraphNode); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":26057 * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): * pnode = int(node) # <<<<<<<<<<<<<< * else: * pnode = int(cudaGraphNode_t(node)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_node); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26057, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pnode = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":26056 * if node is None: * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): # <<<<<<<<<<<<<< * pnode = int(node) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":26059 * pnode = int(node) * else: * pnode = int(cudaGraphNode_t(node)) # <<<<<<<<<<<<<< * cynode = pnode * cdef cudaMemcpy3DParms pNodeParams = cudaMemcpy3DParms() */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_node}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26059, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26059, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pnode = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":26060 * else: * pnode = int(cudaGraphNode_t(node)) * cynode = pnode # <<<<<<<<<<<<<< * cdef cudaMemcpy3DParms pNodeParams = cudaMemcpy3DParms() * with nogil: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pnode); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 26060, __pyx_L1_error) __pyx_v_cynode = ((cudaGraphNode_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":26061 * pnode = int(cudaGraphNode_t(node)) * cynode = pnode * cdef cudaMemcpy3DParms pNodeParams = cudaMemcpy3DParms() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGraphMemcpyNodeGetParams(cynode, pNodeParams._pvt_ptr) */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemcpy3DParms); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemcpy3DParms); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, NULL}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (1-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26061, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_pNodeParams = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemcpy3DParms *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":26062 * cynode = pnode * cdef cudaMemcpy3DParms pNodeParams = cudaMemcpy3DParms() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphMemcpyNodeGetParams(cynode, pNodeParams._pvt_ptr) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":26063 * cdef cudaMemcpy3DParms pNodeParams = cudaMemcpy3DParms() * with nogil: * err = cyruntime.cudaGraphMemcpyNodeGetParams(cynode, pNodeParams._pvt_ptr) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGraphMemcpyNodeGetParams(__pyx_v_cynode, ((struct cudaMemcpy3DParms *)__pyx_v_pNodeParams->_pvt_ptr)); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 26063, __pyx_L7_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":26062 * cynode = pnode * cdef cudaMemcpy3DParms pNodeParams = cudaMemcpy3DParms() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphMemcpyNodeGetParams(cynode, pNodeParams._pvt_ptr) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":26064 * with nogil: * err = cyruntime.cudaGraphMemcpyNodeGetParams(cynode, pNodeParams._pvt_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pNodeParams) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":26065 * err = cyruntime.cudaGraphMemcpyNodeGetParams(cynode, pNodeParams._pvt_ptr) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], pNodeParams) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26065, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26065, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26065, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26065, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 26065, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, Py_None) != (0)) __PYX_ERR(0, 26065, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":26064 * with nogil: * err = cyruntime.cudaGraphMemcpyNodeGetParams(cynode, pNodeParams._pvt_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pNodeParams) */ } /* "cuda/bindings/runtime.pyx":26066 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pNodeParams) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26066, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26066, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26066, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26066, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 26066, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_pNodeParams); __Pyx_GIVEREF((PyObject *)__pyx_v_pNodeParams); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, ((PyObject *)__pyx_v_pNodeParams)) != (0)) __PYX_ERR(0, 26066, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":26031 * return (_dict_cudaError_t[err], pGraphNode) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphMemcpyNodeGetParams(node): * """ Returns a memcpy node's parameters. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphMemcpyNodeGetParams", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pnode); __Pyx_XDECREF((PyObject *)__pyx_v_pNodeParams); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":26068 * return (_dict_cudaError_t[err], pNodeParams) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphMemcpyNodeSetParams(node, pNodeParams : Optional[cudaMemcpy3DParms]): * """ Sets a memcpy node's parameters. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_391cudaGraphMemcpyNodeSetParams(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_390cudaGraphMemcpyNodeSetParams, "cudaGraphMemcpyNodeSetParams(node, cudaMemcpy3DParms pNodeParams: Optional[cudaMemcpy3DParms])\n\nSets a memcpy node's parameters.\n\nSets the parameters of memcpy node `node` to `pNodeParams`.\n\nParameters\n----------\nnode : :py:obj:`~.CUgraphNode` or :py:obj:`~.cudaGraphNode_t`\n Node to set the parameters for\npNodeParams : :py:obj:`~.cudaMemcpy3DParms`\n Parameters to copy\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`,\n\nSee Also\n--------\n:py:obj:`~.cudaGraphNodeSetParams`, :py:obj:`~.cudaMemcpy3D`, :py:obj:`~.cudaGraphMemcpyNodeSetParamsToSymbol`, :py:obj:`~.cudaGraphMemcpyNodeSetParamsFromSymbol`, :py:obj:`~.cudaGraphMemcpyNodeSetParams1D`, :py:obj:`~.cudaGraphAddMemcpyNode`, :py:obj:`~.cudaGraphMemcpyNodeGetParams`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_391cudaGraphMemcpyNodeSetParams = {"cudaGraphMemcpyNodeSetParams", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_391cudaGraphMemcpyNodeSetParams, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_390cudaGraphMemcpyNodeSetParams}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_391cudaGraphMemcpyNodeSetParams(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_node = 0; struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemcpy3DParms *__pyx_v_pNodeParams = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGraphMemcpyNodeSetParams (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_node_2,&__pyx_mstate_global->__pyx_n_u_pNodeParams,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 26068, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 26068, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 26068, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGraphMemcpyNodeSetParams", 0) < (0)) __PYX_ERR(0, 26068, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGraphMemcpyNodeSetParams", 1, 2, 2, i); __PYX_ERR(0, 26068, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 26068, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 26068, __pyx_L3_error) } __pyx_v_node = values[0]; __pyx_v_pNodeParams = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemcpy3DParms *)values[1]); } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGraphMemcpyNodeSetParams", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 26068, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphMemcpyNodeSetParams", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_pNodeParams), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemcpy3DParms, 1, "pNodeParams", 0))) __PYX_ERR(0, 26069, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_390cudaGraphMemcpyNodeSetParams(__pyx_self, __pyx_v_node, __pyx_v_pNodeParams); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_390cudaGraphMemcpyNodeSetParams(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_node, struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemcpy3DParms *__pyx_v_pNodeParams) { cudaGraphNode_t __pyx_v_cynode; PyObject *__pyx_v_pnode = NULL; struct cudaMemcpy3DParms *__pyx_v_cypNodeParams_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; struct cudaMemcpy3DParms *__pyx_t_8; cudaError_t __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGraphMemcpyNodeSetParams", 0); /* "cuda/bindings/runtime.pyx":26091 * """ * cdef cyruntime.cudaGraphNode_t cynode * if node is None: # <<<<<<<<<<<<<< * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): */ __pyx_t_1 = (__pyx_v_node == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":26092 * cdef cyruntime.cudaGraphNode_t cynode * if node is None: * pnode = 0 # <<<<<<<<<<<<<< * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): * pnode = int(node) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pnode = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":26091 * """ * cdef cyruntime.cudaGraphNode_t cynode * if node is None: # <<<<<<<<<<<<<< * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":26093 * if node is None: * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): # <<<<<<<<<<<<<< * pnode = int(node) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_node, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_node, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraphNode); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":26094 * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): * pnode = int(node) # <<<<<<<<<<<<<< * else: * pnode = int(cudaGraphNode_t(node)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_node); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26094, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pnode = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":26093 * if node is None: * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): # <<<<<<<<<<<<<< * pnode = int(node) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":26096 * pnode = int(node) * else: * pnode = int(cudaGraphNode_t(node)) # <<<<<<<<<<<<<< * cynode = pnode * cdef cyruntime.cudaMemcpy3DParms* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_node}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26096, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26096, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pnode = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":26097 * else: * pnode = int(cudaGraphNode_t(node)) * cynode = pnode # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemcpy3DParms* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL * with nogil: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pnode); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 26097, __pyx_L1_error) __pyx_v_cynode = ((cudaGraphNode_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":26098 * pnode = int(cudaGraphNode_t(node)) * cynode = pnode * cdef cyruntime.cudaMemcpy3DParms* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGraphMemcpyNodeSetParams(cynode, cypNodeParams_ptr) */ __pyx_t_1 = (((PyObject *)__pyx_v_pNodeParams) != Py_None); if (__pyx_t_1) { __pyx_t_8 = __pyx_v_pNodeParams->_pvt_ptr; } else { __pyx_t_8 = NULL; } __pyx_v_cypNodeParams_ptr = __pyx_t_8; /* "cuda/bindings/runtime.pyx":26099 * cynode = pnode * cdef cyruntime.cudaMemcpy3DParms* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphMemcpyNodeSetParams(cynode, cypNodeParams_ptr) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":26100 * cdef cyruntime.cudaMemcpy3DParms* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL * with nogil: * err = cyruntime.cudaGraphMemcpyNodeSetParams(cynode, cypNodeParams_ptr) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_9 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGraphMemcpyNodeSetParams(__pyx_v_cynode, __pyx_v_cypNodeParams_ptr); if (unlikely(__pyx_t_9 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 26100, __pyx_L7_error) __pyx_v_err = __pyx_t_9; } /* "cuda/bindings/runtime.pyx":26099 * cynode = pnode * cdef cyruntime.cudaMemcpy3DParms* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphMemcpyNodeSetParams(cynode, cypNodeParams_ptr) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":26101 * with nogil: * err = cyruntime.cudaGraphMemcpyNodeSetParams(cynode, cypNodeParams_ptr) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26101, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26101, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26101, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26101, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 26101, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":26068 * return (_dict_cudaError_t[err], pNodeParams) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphMemcpyNodeSetParams(node, pNodeParams : Optional[cudaMemcpy3DParms]): * """ Sets a memcpy node's parameters. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphMemcpyNodeSetParams", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pnode); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":26103 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphMemcpyNodeSetParams1D(node, dst, src, size_t count, kind not None : cudaMemcpyKind): * """ Sets a memcpy node's parameters to perform a 1-dimensional copy. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_393cudaGraphMemcpyNodeSetParams1D(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_392cudaGraphMemcpyNodeSetParams1D, "cudaGraphMemcpyNodeSetParams1D(node, dst, src, size_t count, kind: cudaMemcpyKind)\n\nSets a memcpy node's parameters to perform a 1-dimensional copy.\n\nSets the parameters of memcpy node `node` to the copy described by the\nprovided parameters.\n\nWhen the graph is launched, the node will copy `count` bytes from the\nmemory area pointed to by `src` to the memory area pointed to by `dst`,\nwhere `kind` specifies the direction of the copy, and must be one of\n:py:obj:`~.cudaMemcpyHostToHost`, :py:obj:`~.cudaMemcpyHostToDevice`,\n:py:obj:`~.cudaMemcpyDeviceToHost`,\n:py:obj:`~.cudaMemcpyDeviceToDevice`, or :py:obj:`~.cudaMemcpyDefault`.\nPassing :py:obj:`~.cudaMemcpyDefault` is recommended, in which case the\ntype of transfer is inferred from the pointer values. However,\n:py:obj:`~.cudaMemcpyDefault` is only allowed on systems that support\nunified virtual addressing. Launching a memcpy node with dst and src\npointers that do not match the direction of the copy results in an\nundefined behavior.\n\nParameters\n----------\nnode : :py:obj:`~.CUgraphNode` or :py:obj:`~.cudaGraphNode_t`\n Node to set the parameters for\ndst : Any\n Destination memory address\nsrc : Any\n Source memory address\ncount : size_t\n Size in bytes to copy\nkind : :py:obj:`~.cudaMemcpyKind`\n Type of transfer\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\n\nSee Also\n--------\n:py:obj:`~.cudaMemcpy`, :py:obj:`~.cudaGraphMemcpyNodeSetParams`, :py:obj:`~.cudaGraphAddMemcpyNode`, :py:obj:`~.cudaGraphMemcpyNodeGetParams`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_393cudaGraphMemcpyNodeSetParams1D = {"cudaGraphMemcpyNodeSetParams1D", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_393cudaGraphMemcpyNodeSetParams1D, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_392cudaGraphMemcpyNodeSetParams1D}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_393cudaGraphMemcpyNodeSetParams1D(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_node = 0; PyObject *__pyx_v_dst = 0; PyObject *__pyx_v_src = 0; size_t __pyx_v_count; PyObject *__pyx_v_kind = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[5] = {0,0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGraphMemcpyNodeSetParams1D (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_node_2,&__pyx_mstate_global->__pyx_n_u_dst_2,&__pyx_mstate_global->__pyx_n_u_src_2,&__pyx_mstate_global->__pyx_n_u_count,&__pyx_mstate_global->__pyx_n_u_kind_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 26103, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 5: values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 26103, __pyx_L3_error) CYTHON_FALLTHROUGH; case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 26103, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 26103, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 26103, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 26103, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGraphMemcpyNodeSetParams1D", 0) < (0)) __PYX_ERR(0, 26103, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 5; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGraphMemcpyNodeSetParams1D", 1, 5, 5, i); __PYX_ERR(0, 26103, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 5)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 26103, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 26103, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 26103, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 26103, __pyx_L3_error) values[4] = __Pyx_ArgRef_FASTCALL(__pyx_args, 4); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[4])) __PYX_ERR(0, 26103, __pyx_L3_error) } __pyx_v_node = values[0]; __pyx_v_dst = values[1]; __pyx_v_src = values[2]; __pyx_v_count = __Pyx_PyLong_As_size_t(values[3]); if (unlikely((__pyx_v_count == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 26104, __pyx_L3_error) __pyx_v_kind = values[4]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGraphMemcpyNodeSetParams1D", 1, 5, 5, __pyx_nargs); __PYX_ERR(0, 26103, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphMemcpyNodeSetParams1D", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(((PyObject *)__pyx_v_kind) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "kind"); __PYX_ERR(0, 26104, __pyx_L1_error) } __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_392cudaGraphMemcpyNodeSetParams1D(__pyx_self, __pyx_v_node, __pyx_v_dst, __pyx_v_src, __pyx_v_count, __pyx_v_kind); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_392cudaGraphMemcpyNodeSetParams1D(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_node, PyObject *__pyx_v_dst, PyObject *__pyx_v_src, size_t __pyx_v_count, PyObject *__pyx_v_kind) { cudaGraphNode_t __pyx_v_cynode; PyObject *__pyx_v_pnode = NULL; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cydst = NULL; void *__pyx_v_cydst_ptr; struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *__pyx_v_cysrc = NULL; void *__pyx_v_cysrc_ptr; enum cudaMemcpyKind __pyx_v_cykind; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; enum cudaMemcpyKind __pyx_t_8; cudaError_t __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGraphMemcpyNodeSetParams1D", 0); /* "cuda/bindings/runtime.pyx":26146 * """ * cdef cyruntime.cudaGraphNode_t cynode * if node is None: # <<<<<<<<<<<<<< * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): */ __pyx_t_1 = (__pyx_v_node == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":26147 * cdef cyruntime.cudaGraphNode_t cynode * if node is None: * pnode = 0 # <<<<<<<<<<<<<< * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): * pnode = int(node) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pnode = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":26146 * """ * cdef cyruntime.cudaGraphNode_t cynode * if node is None: # <<<<<<<<<<<<<< * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":26148 * if node is None: * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): # <<<<<<<<<<<<<< * pnode = int(node) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_node, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_node, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraphNode); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":26149 * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): * pnode = int(node) # <<<<<<<<<<<<<< * else: * pnode = int(cudaGraphNode_t(node)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_node); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26149, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pnode = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":26148 * if node is None: * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): # <<<<<<<<<<<<<< * pnode = int(node) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":26151 * pnode = int(node) * else: * pnode = int(cudaGraphNode_t(node)) # <<<<<<<<<<<<<< * cynode = pnode * cydst = _HelperInputVoidPtr(dst) */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_node}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26151, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pnode = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":26152 * else: * pnode = int(cudaGraphNode_t(node)) * cynode = pnode # <<<<<<<<<<<<<< * cydst = _HelperInputVoidPtr(dst) * cdef void* cydst_ptr = cydst.cptr */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pnode); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 26152, __pyx_L1_error) __pyx_v_cynode = ((cudaGraphNode_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":26153 * pnode = int(cudaGraphNode_t(node)) * cynode = pnode * cydst = _HelperInputVoidPtr(dst) # <<<<<<<<<<<<<< * cdef void* cydst_ptr = cydst.cptr * cysrc = _HelperInputVoidPtr(src) */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_dst}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26153, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_cydst = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":26154 * cynode = pnode * cydst = _HelperInputVoidPtr(dst) * cdef void* cydst_ptr = cydst.cptr # <<<<<<<<<<<<<< * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cydst), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26154, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_5); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 26154, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_cydst_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":26155 * cydst = _HelperInputVoidPtr(dst) * cdef void* cydst_ptr = cydst.cptr * cysrc = _HelperInputVoidPtr(src) # <<<<<<<<<<<<<< * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value */ __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_3 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime__HelperInputVoidPtr); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_src}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26155, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_cysrc = ((struct __pyx_obj_4cuda_8bindings_7runtime__HelperInputVoidPtr *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":26156 * cdef void* cydst_ptr = cydst.cptr * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_cysrc), __pyx_mstate_global->__pyx_n_u_cptr); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26156, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_t_5); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 26156, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_cysrc_ptr = ((void *)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":26157 * cysrc = _HelperInputVoidPtr(src) * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGraphMemcpyNodeSetParams1D(cynode, cydst_ptr, cysrc_ptr, count, cykind) */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_kind, __pyx_mstate_global->__pyx_n_u_value); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26157, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_8 = ((enum cudaMemcpyKind)__Pyx_PyLong_As_enum__cudaMemcpyKind(__pyx_t_5)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 26157, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_cykind = __pyx_t_8; /* "cuda/bindings/runtime.pyx":26158 * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphMemcpyNodeSetParams1D(cynode, cydst_ptr, cysrc_ptr, count, cykind) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":26159 * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: * err = cyruntime.cudaGraphMemcpyNodeSetParams1D(cynode, cydst_ptr, cysrc_ptr, count, cykind) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_9 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGraphMemcpyNodeSetParams1D(__pyx_v_cynode, __pyx_v_cydst_ptr, __pyx_v_cysrc_ptr, __pyx_v_count, __pyx_v_cykind); if (unlikely(__pyx_t_9 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 26159, __pyx_L7_error) __pyx_v_err = __pyx_t_9; } /* "cuda/bindings/runtime.pyx":26158 * cdef void* cysrc_ptr = cysrc.cptr * cdef cyruntime.cudaMemcpyKind cykind = kind.value * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphMemcpyNodeSetParams1D(cynode, cydst_ptr, cysrc_ptr, count, cykind) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":26160 * with nogil: * err = cyruntime.cudaGraphMemcpyNodeSetParams1D(cynode, cydst_ptr, cysrc_ptr, count, cykind) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26160, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26160, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26160, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26160, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 26160, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":26103 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphMemcpyNodeSetParams1D(node, dst, src, size_t count, kind not None : cudaMemcpyKind): * """ Sets a memcpy node's parameters to perform a 1-dimensional copy. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphMemcpyNodeSetParams1D", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pnode); __Pyx_XDECREF((PyObject *)__pyx_v_cydst); __Pyx_XDECREF((PyObject *)__pyx_v_cysrc); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":26162 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphAddMemsetNode(graph, pDependencies : Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies, pMemsetParams : Optional[cudaMemsetParams]): * """ Creates a memset node and adds it to a graph. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_395cudaGraphAddMemsetNode(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_394cudaGraphAddMemsetNode, "cudaGraphAddMemsetNode(graph, pDependencies: Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies, cudaMemsetParams pMemsetParams: Optional[cudaMemsetParams])\n\nCreates a memset node and adds it to a graph.\n\nCreates a new memset node and adds it to `graph` with `numDependencies`\ndependencies specified via `pDependencies`. It is possible for\n`numDependencies` to be 0, in which case the node will be placed at the\nroot of the graph. `pDependencies` may not have any duplicate entries.\nA handle to the new node will be returned in `pGraphNode`.\n\nThe element size must be 1, 2, or 4 bytes. When the graph is launched,\nthe node will perform the memset described by `pMemsetParams`.\n\nParameters\n----------\ngraph : :py:obj:`~.CUgraph` or :py:obj:`~.cudaGraph_t`\n Graph to which to add the node\npDependencies : list[:py:obj:`~.cudaGraphNode_t`]\n Dependencies of the node\nnumDependencies : size_t\n Number of dependencies\npMemsetParams : :py:obj:`~.cudaMemsetParams`\n Parameters for the memory set\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`, :py:obj:`~.cudaErrorInvalidDevice`\npGraphNode : :py:obj:`~.cudaGraphNode_t`\n Returns newly created node\n\nSee Also\n--------\n:py:obj:`~.cudaGraphAddNode`, :py:obj:`~.cudaMemset2D`, :py:obj:`~.cudaGraphMemsetNodeGetParams`, :py:obj:`~.cudaGraphMemsetNodeSetParams`, :py:obj:`~.cudaGraphCreate`, :py:obj:`~.cudaGraphDestroyNode`, :py:obj:`~.cudaGraphAddChildGraphNode`, :py:obj:`~.cudaGraphAddEmptyNode`, :py:obj:`~.cudaGraphAddKernelNode`, :py:obj:`~.cudaGraphAddHostNode`, :py:obj:`~.cudaGraphAddMemcpyNode`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_395cudaGraphAddMemsetNode = {"cudaGraphAddMemsetNode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_395cudaGraphAddMemsetNode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_394cudaGraphAddMemsetNode}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_395cudaGraphAddMemsetNode(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_graph = 0; PyObject *__pyx_v_pDependencies = 0; size_t __pyx_v_numDependencies; struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemsetParams *__pyx_v_pMemsetParams = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[4] = {0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGraphAddMemsetNode (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_graph_2,&__pyx_mstate_global->__pyx_n_u_pDependencies,&__pyx_mstate_global->__pyx_n_u_numDependencies,&__pyx_mstate_global->__pyx_n_u_pMemsetParams,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 26162, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 26162, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 26162, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 26162, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 26162, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGraphAddMemsetNode", 0) < (0)) __PYX_ERR(0, 26162, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 4; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGraphAddMemsetNode", 1, 4, 4, i); __PYX_ERR(0, 26162, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 4)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 26162, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 26162, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 26162, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 26162, __pyx_L3_error) } __pyx_v_graph = values[0]; __pyx_v_pDependencies = values[1]; __pyx_v_numDependencies = __Pyx_PyLong_As_size_t(values[2]); if (unlikely((__pyx_v_numDependencies == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 26163, __pyx_L3_error) __pyx_v_pMemsetParams = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemsetParams *)values[3]); } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGraphAddMemsetNode", 1, 4, 4, __pyx_nargs); __PYX_ERR(0, 26162, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphAddMemsetNode", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_pMemsetParams), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemsetParams, 1, "pMemsetParams", 0))) __PYX_ERR(0, 26163, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_394cudaGraphAddMemsetNode(__pyx_self, __pyx_v_graph, __pyx_v_pDependencies, __pyx_v_numDependencies, __pyx_v_pMemsetParams); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_22cudaGraphAddMemsetNode_2generator97(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value); /* proto */ /* "cuda/bindings/runtime.pyx":26198 * """ * pDependencies = [] if pDependencies is None else pDependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): # <<<<<<<<<<<<<< * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_22cudaGraphAddMemsetNode_genexpr(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_genexpr_arg_0) { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_97_genexpr *__pyx_cur_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("genexpr", 0); __pyx_cur_scope = (struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_97_genexpr *)__pyx_tp_new_4cuda_8bindings_7runtime___pyx_scope_struct_97_genexpr(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime___pyx_scope_struct_97_genexpr, __pyx_mstate_global->__pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_97_genexpr *)Py_None); __Pyx_INCREF(Py_None); __PYX_ERR(0, 26198, __pyx_L1_error) } else { __Pyx_GOTREF((PyObject *)__pyx_cur_scope); } __pyx_cur_scope->__pyx_genexpr_arg_0 = __pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_cur_scope->__pyx_genexpr_arg_0); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_genexpr_arg_0); { __pyx_CoroutineObject *gen = __Pyx_Generator_New((__pyx_coroutine_body_t) __pyx_gb_4cuda_8bindings_7runtime_22cudaGraphAddMemsetNode_2generator97, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[97]), (PyObject *) __pyx_cur_scope, __pyx_mstate_global->__pyx_n_u_genexpr, __pyx_mstate_global->__pyx_n_u_cudaGraphAddMemsetNode_locals_ge, __pyx_mstate_global->__pyx_n_u_cuda_bindings_runtime); if (unlikely(!gen)) __PYX_ERR(0, 26198, __pyx_L1_error) __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; } /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphAddMemsetNode.genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_DECREF((PyObject *)__pyx_cur_scope); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_22cudaGraphAddMemsetNode_2generator97(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value) /* generator body */ { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_97_genexpr *__pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_97_genexpr *)__pyx_generator->closure); PyObject *__pyx_r = NULL; PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *(*__pyx_t_3)(PyObject *); PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("genexpr", 0); switch (__pyx_generator->resume_label) { case 0: goto __pyx_L3_first_run; default: /* CPython raises the right error here */ __Pyx_RefNannyFinishContext(); return NULL; } __pyx_L3_first_run:; if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 26198, __pyx_L1_error) if (unlikely(!__pyx_cur_scope->__pyx_genexpr_arg_0)) { __Pyx_RaiseUnboundLocalError(".0"); __PYX_ERR(0, 26198, __pyx_L1_error) } if (likely(PyList_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) || PyTuple_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) { __pyx_t_1 = __pyx_cur_scope->__pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; __pyx_t_3 = NULL; } else { __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_cur_scope->__pyx_genexpr_arg_0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26198, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26198, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_3)) { if (likely(PyList_CheckExact(__pyx_t_1))) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 26198, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } __pyx_t_4 = __Pyx_PyList_GetItemRef(__pyx_t_1, __pyx_t_2); ++__pyx_t_2; } else { { Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 26198, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2)); #else __pyx_t_4 = __Pyx_PySequence_ITEM(__pyx_t_1, __pyx_t_2); #endif ++__pyx_t_2; } if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26198, __pyx_L1_error) } else { __pyx_t_4 = __pyx_t_3(__pyx_t_1); if (unlikely(!__pyx_t_4)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 26198, __pyx_L1_error) PyErr_Clear(); } break; } } __Pyx_GOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v__x); __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v__x, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_TypeCheck(__pyx_cur_scope->__pyx_v__x, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); if (!__pyx_t_6) { } else { __pyx_t_5 = __pyx_t_6; goto __pyx_L7_bool_binop_done; } __pyx_t_6 = __Pyx_TypeCheck(__pyx_cur_scope->__pyx_v__x, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraphNode); __pyx_t_5 = __pyx_t_6; __pyx_L7_bool_binop_done:; __pyx_t_6 = (!__pyx_t_5); if (__pyx_t_6) { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_False); __pyx_r = Py_False; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_True); __pyx_r = Py_True; goto __pyx_L0; } CYTHON_MAYBE_UNUSED_VAR(__pyx_cur_scope); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); if (__Pyx_PyErr_Occurred()) { __Pyx_Generator_Replace_StopIteration(0); __Pyx_AddTraceback("genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); } __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); #if !CYTHON_USE_EXC_INFO_STACK __Pyx_Coroutine_ResetAndClearException(__pyx_generator); #endif __pyx_generator->resume_label = -1; __Pyx_Coroutine_clear((PyObject*)__pyx_generator); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":26162 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphAddMemsetNode(graph, pDependencies : Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies, pMemsetParams : Optional[cudaMemsetParams]): * """ Creates a memset node and adds it to a graph. */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_394cudaGraphAddMemsetNode(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_graph, PyObject *__pyx_v_pDependencies, size_t __pyx_v_numDependencies, struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemsetParams *__pyx_v_pMemsetParams) { cudaGraph_t __pyx_v_cygraph; PyObject *__pyx_v_pgraph = NULL; struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *__pyx_v_pGraphNode = 0; cudaGraphNode_t *__pyx_v_cypDependencies; Py_ssize_t __pyx_v_idx; struct cudaMemsetParams *__pyx_v_cypMemsetParams_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_gb_4cuda_8bindings_7runtime_22cudaGraphAddMemsetNode_2generator97 = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; Py_ssize_t __pyx_t_8; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; Py_ssize_t __pyx_t_12; Py_ssize_t __pyx_t_13; struct cudaMemsetParams *__pyx_t_14; cudaError_t __pyx_t_15; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGraphAddMemsetNode", 0); __Pyx_INCREF(__pyx_v_pDependencies); /* "cuda/bindings/runtime.pyx":26197 * :py:obj:`~.cudaGraphAddNode`, :py:obj:`~.cudaMemset2D`, :py:obj:`~.cudaGraphMemsetNodeGetParams`, :py:obj:`~.cudaGraphMemsetNodeSetParams`, :py:obj:`~.cudaGraphCreate`, :py:obj:`~.cudaGraphDestroyNode`, :py:obj:`~.cudaGraphAddChildGraphNode`, :py:obj:`~.cudaGraphAddEmptyNode`, :py:obj:`~.cudaGraphAddKernelNode`, :py:obj:`~.cudaGraphAddHostNode`, :py:obj:`~.cudaGraphAddMemcpyNode` * """ * pDependencies = [] if pDependencies is None else pDependencies # <<<<<<<<<<<<<< * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") */ __pyx_t_2 = (__pyx_v_pDependencies == Py_None); if (__pyx_t_2) { __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26197, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = __pyx_t_3; __pyx_t_3 = 0; } else { __Pyx_INCREF(__pyx_v_pDependencies); __pyx_t_1 = __pyx_v_pDependencies; } __Pyx_DECREF_SET(__pyx_v_pDependencies, __pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":26198 * """ * pDependencies = [] if pDependencies is None else pDependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): # <<<<<<<<<<<<<< * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph */ __pyx_t_1 = __pyx_pf_4cuda_8bindings_7runtime_22cudaGraphAddMemsetNode_genexpr(NULL, __pyx_v_pDependencies); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26198, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_Generator_GetInlinedResult(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26198, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 26198, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_4 = (!__pyx_t_2); if (unlikely(__pyx_t_4)) { /* "cuda/bindings/runtime.pyx":26199 * pDependencies = [] if pDependencies is None else pDependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") # <<<<<<<<<<<<<< * cdef cyruntime.cudaGraph_t cygraph * if graph is None: */ __pyx_t_1 = NULL; __Pyx_INCREF(__pyx_builtin_TypeError); __pyx_t_5 = __pyx_builtin_TypeError; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_Argument_pDependencies_is_not_in}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26199, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 26199, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":26198 * """ * pDependencies = [] if pDependencies is None else pDependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): # <<<<<<<<<<<<<< * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph */ } /* "cuda/bindings/runtime.pyx":26201 * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph * if graph is None: # <<<<<<<<<<<<<< * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): */ __pyx_t_4 = (__pyx_v_graph == Py_None); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":26202 * cdef cyruntime.cudaGraph_t cygraph * if graph is None: * pgraph = 0 # <<<<<<<<<<<<<< * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): * pgraph = int(graph) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pgraph = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":26201 * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph * if graph is None: # <<<<<<<<<<<<<< * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): */ goto __pyx_L4; } /* "cuda/bindings/runtime.pyx":26203 * if graph is None: * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): # <<<<<<<<<<<<<< * pgraph = int(graph) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_graph, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); if (!__pyx_t_2) { } else { __pyx_t_4 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_graph, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraph); __pyx_t_4 = __pyx_t_2; __pyx_L5_bool_binop_done:; if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":26204 * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): * pgraph = int(graph) # <<<<<<<<<<<<<< * else: * pgraph = int(cudaGraph_t(graph)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_graph); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26204, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pgraph = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":26203 * if graph is None: * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): # <<<<<<<<<<<<<< * pgraph = int(graph) * else: */ goto __pyx_L4; } /* "cuda/bindings/runtime.pyx":26206 * pgraph = int(graph) * else: * pgraph = int(cudaGraph_t(graph)) # <<<<<<<<<<<<<< * cygraph = pgraph * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() */ /*else*/ { __pyx_t_5 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); __pyx_t_1 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_v_graph}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26206, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_1 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26206, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pgraph = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; } __pyx_L4:; /* "cuda/bindings/runtime.pyx":26207 * else: * pgraph = int(cudaGraph_t(graph)) * cygraph = pgraph # <<<<<<<<<<<<<< * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pgraph); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 26207, __pyx_L1_error) __pyx_v_cygraph = ((cudaGraph_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":26208 * pgraph = int(cudaGraph_t(graph)) * cygraph = pgraph * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() # <<<<<<<<<<<<<< * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL * if len(pDependencies) > 1: */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (1-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26208, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_pGraphNode = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":26209 * cygraph = pgraph * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL # <<<<<<<<<<<<<< * if len(pDependencies) > 1: * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) */ __pyx_v_cypDependencies = NULL; /* "cuda/bindings/runtime.pyx":26210 * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL * if len(pDependencies) > 1: # <<<<<<<<<<<<<< * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26210, __pyx_L1_error) __pyx_t_4 = (__pyx_t_8 > 1); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":26211 * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL * if len(pDependencies) > 1: * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) # <<<<<<<<<<<<<< * if cypDependencies is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26211, __pyx_L1_error) __pyx_v_cypDependencies = ((cudaGraphNode_t *)calloc(__pyx_t_8, (sizeof(cudaGraphNode_t)))); /* "cuda/bindings/runtime.pyx":26212 * if len(pDependencies) > 1: * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) * else: */ __pyx_t_4 = (__pyx_v_cypDependencies == NULL); if (unlikely(__pyx_t_4)) { /* "cuda/bindings/runtime.pyx":26213 * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) # <<<<<<<<<<<<<< * else: * for idx in range(len(pDependencies)): */ __pyx_t_5 = NULL; __Pyx_INCREF(__pyx_builtin_MemoryError); __pyx_t_3 = __pyx_builtin_MemoryError; __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26213, __pyx_L1_error) __pyx_t_9 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26213, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 26213, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_Failed_to_allocate_length_x_size, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26213, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_x_2); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 26213, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyLong_FromSize_t((sizeof(cudaGraphNode_t))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26213, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_9); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 26213, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_10, __pyx_t_11); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26213, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_9}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26213, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 26213, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":26212 * if len(pDependencies) > 1: * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) * else: */ } /* "cuda/bindings/runtime.pyx":26215 * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) * else: * for idx in range(len(pDependencies)): # <<<<<<<<<<<<<< * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] * elif len(pDependencies) == 1: */ /*else*/ { __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26215, __pyx_L1_error) __pyx_t_12 = __pyx_t_8; for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { __pyx_v_idx = __pyx_t_13; /* "cuda/bindings/runtime.pyx":26216 * else: * for idx in range(len(pDependencies)): * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] # <<<<<<<<<<<<<< * elif len(pDependencies) == 1: * cypDependencies = (pDependencies[0])._pvt_ptr */ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_pDependencies, __pyx_v_idx, Py_ssize_t, 1, PyLong_FromSsize_t, 0, 1, 1, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26216, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); (__pyx_v_cypDependencies[__pyx_v_idx]) = ((cudaGraphNode_t)(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *)__pyx_t_1)->__pyx_base._pvt_ptr[0])); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } } /* "cuda/bindings/runtime.pyx":26210 * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL * if len(pDependencies) > 1: # <<<<<<<<<<<<<< * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: */ goto __pyx_L7; } /* "cuda/bindings/runtime.pyx":26217 * for idx in range(len(pDependencies)): * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] * elif len(pDependencies) == 1: # <<<<<<<<<<<<<< * cypDependencies = (pDependencies[0])._pvt_ptr * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26217, __pyx_L1_error) __pyx_t_4 = (__pyx_t_8 == 1); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":26218 * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] * elif len(pDependencies) == 1: * cypDependencies = (pDependencies[0])._pvt_ptr # <<<<<<<<<<<<<< * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) * cdef cyruntime.cudaMemsetParams* cypMemsetParams_ptr = pMemsetParams._pvt_ptr if pMemsetParams is not None else NULL */ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_pDependencies, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26218, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_cypDependencies = ((cudaGraphNode_t *)((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *)__pyx_t_1)->__pyx_base._pvt_ptr); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":26217 * for idx in range(len(pDependencies)): * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] * elif len(pDependencies) == 1: # <<<<<<<<<<<<<< * cypDependencies = (pDependencies[0])._pvt_ptr * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) */ } __pyx_L7:; /* "cuda/bindings/runtime.pyx":26219 * elif len(pDependencies) == 1: * cypDependencies = (pDependencies[0])._pvt_ptr * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemsetParams* cypMemsetParams_ptr = pMemsetParams._pvt_ptr if pMemsetParams is not None else NULL * with nogil: */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26219, __pyx_L1_error) __pyx_t_4 = (__pyx_v_numDependencies > ((size_t)__pyx_t_8)); if (unlikely(__pyx_t_4)) { __pyx_t_3 = NULL; __Pyx_INCREF(__pyx_builtin_RuntimeError); __pyx_t_9 = __pyx_builtin_RuntimeError; __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26219, __pyx_L1_error) __pyx_t_5 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26219, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 26219, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_List_is_too_small, __pyx_t_11); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26219, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_11 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_5, __pyx_mstate_global->__pyx_kp_u__6); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 26219, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyLong_FromSize_t(__pyx_v_numDependencies); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26219, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 26219, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_11, __pyx_t_10); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26219, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_5}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_9, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26219, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 26219, __pyx_L1_error) } /* "cuda/bindings/runtime.pyx":26220 * cypDependencies = (pDependencies[0])._pvt_ptr * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) * cdef cyruntime.cudaMemsetParams* cypMemsetParams_ptr = pMemsetParams._pvt_ptr if pMemsetParams is not None else NULL # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGraphAddMemsetNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cypMemsetParams_ptr) */ __pyx_t_4 = (((PyObject *)__pyx_v_pMemsetParams) != Py_None); if (__pyx_t_4) { __pyx_t_14 = __pyx_v_pMemsetParams->_pvt_ptr; } else { __pyx_t_14 = NULL; } __pyx_v_cypMemsetParams_ptr = __pyx_t_14; /* "cuda/bindings/runtime.pyx":26221 * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) * cdef cyruntime.cudaMemsetParams* cypMemsetParams_ptr = pMemsetParams._pvt_ptr if pMemsetParams is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphAddMemsetNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cypMemsetParams_ptr) * if len(pDependencies) > 1 and cypDependencies is not NULL: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":26222 * cdef cyruntime.cudaMemsetParams* cypMemsetParams_ptr = pMemsetParams._pvt_ptr if pMemsetParams is not None else NULL * with nogil: * err = cyruntime.cudaGraphAddMemsetNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cypMemsetParams_ptr) # <<<<<<<<<<<<<< * if len(pDependencies) > 1 and cypDependencies is not NULL: * free(cypDependencies) */ __pyx_t_15 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGraphAddMemsetNode(((cudaGraphNode_t *)__pyx_v_pGraphNode->__pyx_base._pvt_ptr), __pyx_v_cygraph, __pyx_v_cypDependencies, __pyx_v_numDependencies, __pyx_v_cypMemsetParams_ptr); if (unlikely(__pyx_t_15 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 26222, __pyx_L13_error) __pyx_v_err = __pyx_t_15; } /* "cuda/bindings/runtime.pyx":26221 * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) * cdef cyruntime.cudaMemsetParams* cypMemsetParams_ptr = pMemsetParams._pvt_ptr if pMemsetParams is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphAddMemsetNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cypMemsetParams_ptr) * if len(pDependencies) > 1 and cypDependencies is not NULL: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L14; } __pyx_L13_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L14:; } } /* "cuda/bindings/runtime.pyx":26223 * with nogil: * err = cyruntime.cudaGraphAddMemsetNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cypMemsetParams_ptr) * if len(pDependencies) > 1 and cypDependencies is not NULL: # <<<<<<<<<<<<<< * free(cypDependencies) * if err != cyruntime.cudaSuccess: */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26223, __pyx_L1_error) __pyx_t_2 = (__pyx_t_8 > 1); if (__pyx_t_2) { } else { __pyx_t_4 = __pyx_t_2; goto __pyx_L16_bool_binop_done; } __pyx_t_2 = (__pyx_v_cypDependencies != NULL); __pyx_t_4 = __pyx_t_2; __pyx_L16_bool_binop_done:; if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":26224 * err = cyruntime.cudaGraphAddMemsetNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cypMemsetParams_ptr) * if len(pDependencies) > 1 and cypDependencies is not NULL: * free(cypDependencies) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ free(__pyx_v_cypDependencies); /* "cuda/bindings/runtime.pyx":26223 * with nogil: * err = cyruntime.cudaGraphAddMemsetNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cypMemsetParams_ptr) * if len(pDependencies) > 1 and cypDependencies is not NULL: # <<<<<<<<<<<<<< * free(cypDependencies) * if err != cyruntime.cudaSuccess: */ } /* "cuda/bindings/runtime.pyx":26225 * if len(pDependencies) > 1 and cypDependencies is not NULL: * free(cypDependencies) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pGraphNode) */ __pyx_t_4 = (__pyx_v_err != cudaSuccess); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":26226 * free(cypDependencies) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], pGraphNode) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_9 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_9); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 26226, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 1, Py_None) != (0)) __PYX_ERR(0, 26226, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_9; __pyx_t_9 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":26225 * if len(pDependencies) > 1 and cypDependencies is not NULL: * free(cypDependencies) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pGraphNode) */ } /* "cuda/bindings/runtime.pyx":26227 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pGraphNode) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26227, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26227, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_9, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26227, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26227, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 26227, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_pGraphNode); __Pyx_GIVEREF((PyObject *)__pyx_v_pGraphNode); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject *)__pyx_v_pGraphNode)) != (0)) __PYX_ERR(0, 26227, __pyx_L1_error); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":26162 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphAddMemsetNode(graph, pDependencies : Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies, pMemsetParams : Optional[cudaMemsetParams]): * """ Creates a memset node and adds it to a graph. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphAddMemsetNode", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pgraph); __Pyx_XDECREF((PyObject *)__pyx_v_pGraphNode); __Pyx_XDECREF(__pyx_gb_4cuda_8bindings_7runtime_22cudaGraphAddMemsetNode_2generator97); __Pyx_XDECREF(__pyx_v_pDependencies); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":26229 * return (_dict_cudaError_t[err], pGraphNode) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphMemsetNodeGetParams(node): * """ Returns a memset node's parameters. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_397cudaGraphMemsetNodeGetParams(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_396cudaGraphMemsetNodeGetParams, "cudaGraphMemsetNodeGetParams(node)\n\nReturns a memset node's parameters.\n\nReturns the parameters of memset node `node` in `pNodeParams`.\n\nParameters\n----------\nnode : :py:obj:`~.CUgraphNode` or :py:obj:`~.cudaGraphNode_t`\n Node to get the parameters for\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\npNodeParams : :py:obj:`~.cudaMemsetParams`\n Pointer to return the parameters\n\nSee Also\n--------\n:py:obj:`~.cudaMemset2D`, :py:obj:`~.cudaGraphAddMemsetNode`, :py:obj:`~.cudaGraphMemsetNodeSetParams`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_397cudaGraphMemsetNodeGetParams = {"cudaGraphMemsetNodeGetParams", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_397cudaGraphMemsetNodeGetParams, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_396cudaGraphMemsetNodeGetParams}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_397cudaGraphMemsetNodeGetParams(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_node = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGraphMemsetNodeGetParams (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_node_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 26229, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 26229, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGraphMemsetNodeGetParams", 0) < (0)) __PYX_ERR(0, 26229, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGraphMemsetNodeGetParams", 1, 1, 1, i); __PYX_ERR(0, 26229, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 26229, __pyx_L3_error) } __pyx_v_node = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGraphMemsetNodeGetParams", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 26229, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphMemsetNodeGetParams", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_396cudaGraphMemsetNodeGetParams(__pyx_self, __pyx_v_node); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_396cudaGraphMemsetNodeGetParams(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_node) { cudaGraphNode_t __pyx_v_cynode; PyObject *__pyx_v_pnode = NULL; struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemsetParams *__pyx_v_pNodeParams = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGraphMemsetNodeGetParams", 0); /* "cuda/bindings/runtime.pyx":26252 * """ * cdef cyruntime.cudaGraphNode_t cynode * if node is None: # <<<<<<<<<<<<<< * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): */ __pyx_t_1 = (__pyx_v_node == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":26253 * cdef cyruntime.cudaGraphNode_t cynode * if node is None: * pnode = 0 # <<<<<<<<<<<<<< * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): * pnode = int(node) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pnode = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":26252 * """ * cdef cyruntime.cudaGraphNode_t cynode * if node is None: # <<<<<<<<<<<<<< * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":26254 * if node is None: * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): # <<<<<<<<<<<<<< * pnode = int(node) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_node, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_node, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraphNode); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":26255 * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): * pnode = int(node) # <<<<<<<<<<<<<< * else: * pnode = int(cudaGraphNode_t(node)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_node); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26255, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pnode = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":26254 * if node is None: * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): # <<<<<<<<<<<<<< * pnode = int(node) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":26257 * pnode = int(node) * else: * pnode = int(cudaGraphNode_t(node)) # <<<<<<<<<<<<<< * cynode = pnode * cdef cudaMemsetParams pNodeParams = cudaMemsetParams() */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_node}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26257, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26257, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pnode = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":26258 * else: * pnode = int(cudaGraphNode_t(node)) * cynode = pnode # <<<<<<<<<<<<<< * cdef cudaMemsetParams pNodeParams = cudaMemsetParams() * with nogil: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pnode); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 26258, __pyx_L1_error) __pyx_v_cynode = ((cudaGraphNode_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":26259 * pnode = int(cudaGraphNode_t(node)) * cynode = pnode * cdef cudaMemsetParams pNodeParams = cudaMemsetParams() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGraphMemsetNodeGetParams(cynode, pNodeParams._pvt_ptr) */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemsetParams); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemsetParams); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, NULL}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (1-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26259, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_pNodeParams = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemsetParams *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":26260 * cynode = pnode * cdef cudaMemsetParams pNodeParams = cudaMemsetParams() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphMemsetNodeGetParams(cynode, pNodeParams._pvt_ptr) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":26261 * cdef cudaMemsetParams pNodeParams = cudaMemsetParams() * with nogil: * err = cyruntime.cudaGraphMemsetNodeGetParams(cynode, pNodeParams._pvt_ptr) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGraphMemsetNodeGetParams(__pyx_v_cynode, ((struct cudaMemsetParams *)__pyx_v_pNodeParams->_pvt_ptr)); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 26261, __pyx_L7_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":26260 * cynode = pnode * cdef cudaMemsetParams pNodeParams = cudaMemsetParams() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphMemsetNodeGetParams(cynode, pNodeParams._pvt_ptr) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":26262 * with nogil: * err = cyruntime.cudaGraphMemsetNodeGetParams(cynode, pNodeParams._pvt_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pNodeParams) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":26263 * err = cyruntime.cudaGraphMemsetNodeGetParams(cynode, pNodeParams._pvt_ptr) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], pNodeParams) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 26263, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, Py_None) != (0)) __PYX_ERR(0, 26263, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":26262 * with nogil: * err = cyruntime.cudaGraphMemsetNodeGetParams(cynode, pNodeParams._pvt_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pNodeParams) */ } /* "cuda/bindings/runtime.pyx":26264 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pNodeParams) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26264, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26264, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26264, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26264, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 26264, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_pNodeParams); __Pyx_GIVEREF((PyObject *)__pyx_v_pNodeParams); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, ((PyObject *)__pyx_v_pNodeParams)) != (0)) __PYX_ERR(0, 26264, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":26229 * return (_dict_cudaError_t[err], pGraphNode) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphMemsetNodeGetParams(node): * """ Returns a memset node's parameters. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphMemsetNodeGetParams", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pnode); __Pyx_XDECREF((PyObject *)__pyx_v_pNodeParams); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":26266 * return (_dict_cudaError_t[err], pNodeParams) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphMemsetNodeSetParams(node, pNodeParams : Optional[cudaMemsetParams]): * """ Sets a memset node's parameters. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_399cudaGraphMemsetNodeSetParams(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_398cudaGraphMemsetNodeSetParams, "cudaGraphMemsetNodeSetParams(node, cudaMemsetParams pNodeParams: Optional[cudaMemsetParams])\n\nSets a memset node's parameters.\n\nSets the parameters of memset node `node` to `pNodeParams`.\n\nParameters\n----------\nnode : :py:obj:`~.CUgraphNode` or :py:obj:`~.cudaGraphNode_t`\n Node to set the parameters for\npNodeParams : :py:obj:`~.cudaMemsetParams`\n Parameters to copy\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\n\nSee Also\n--------\n:py:obj:`~.cudaGraphNodeSetParams`, :py:obj:`~.cudaMemset2D`, :py:obj:`~.cudaGraphAddMemsetNode`, :py:obj:`~.cudaGraphMemsetNodeGetParams`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_399cudaGraphMemsetNodeSetParams = {"cudaGraphMemsetNodeSetParams", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_399cudaGraphMemsetNodeSetParams, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_398cudaGraphMemsetNodeSetParams}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_399cudaGraphMemsetNodeSetParams(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_node = 0; struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemsetParams *__pyx_v_pNodeParams = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGraphMemsetNodeSetParams (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_node_2,&__pyx_mstate_global->__pyx_n_u_pNodeParams,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 26266, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 26266, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 26266, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGraphMemsetNodeSetParams", 0) < (0)) __PYX_ERR(0, 26266, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGraphMemsetNodeSetParams", 1, 2, 2, i); __PYX_ERR(0, 26266, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 26266, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 26266, __pyx_L3_error) } __pyx_v_node = values[0]; __pyx_v_pNodeParams = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemsetParams *)values[1]); } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGraphMemsetNodeSetParams", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 26266, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphMemsetNodeSetParams", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_pNodeParams), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaMemsetParams, 1, "pNodeParams", 0))) __PYX_ERR(0, 26267, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_398cudaGraphMemsetNodeSetParams(__pyx_self, __pyx_v_node, __pyx_v_pNodeParams); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_398cudaGraphMemsetNodeSetParams(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_node, struct __pyx_obj_4cuda_8bindings_7runtime_cudaMemsetParams *__pyx_v_pNodeParams) { cudaGraphNode_t __pyx_v_cynode; PyObject *__pyx_v_pnode = NULL; struct cudaMemsetParams *__pyx_v_cypNodeParams_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; struct cudaMemsetParams *__pyx_t_8; cudaError_t __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGraphMemsetNodeSetParams", 0); /* "cuda/bindings/runtime.pyx":26289 * """ * cdef cyruntime.cudaGraphNode_t cynode * if node is None: # <<<<<<<<<<<<<< * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): */ __pyx_t_1 = (__pyx_v_node == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":26290 * cdef cyruntime.cudaGraphNode_t cynode * if node is None: * pnode = 0 # <<<<<<<<<<<<<< * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): * pnode = int(node) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pnode = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":26289 * """ * cdef cyruntime.cudaGraphNode_t cynode * if node is None: # <<<<<<<<<<<<<< * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":26291 * if node is None: * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): # <<<<<<<<<<<<<< * pnode = int(node) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_node, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_node, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraphNode); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":26292 * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): * pnode = int(node) # <<<<<<<<<<<<<< * else: * pnode = int(cudaGraphNode_t(node)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_node); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26292, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pnode = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":26291 * if node is None: * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): # <<<<<<<<<<<<<< * pnode = int(node) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":26294 * pnode = int(node) * else: * pnode = int(cudaGraphNode_t(node)) # <<<<<<<<<<<<<< * cynode = pnode * cdef cyruntime.cudaMemsetParams* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_node}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26294, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26294, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pnode = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":26295 * else: * pnode = int(cudaGraphNode_t(node)) * cynode = pnode # <<<<<<<<<<<<<< * cdef cyruntime.cudaMemsetParams* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL * with nogil: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pnode); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 26295, __pyx_L1_error) __pyx_v_cynode = ((cudaGraphNode_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":26296 * pnode = int(cudaGraphNode_t(node)) * cynode = pnode * cdef cyruntime.cudaMemsetParams* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGraphMemsetNodeSetParams(cynode, cypNodeParams_ptr) */ __pyx_t_1 = (((PyObject *)__pyx_v_pNodeParams) != Py_None); if (__pyx_t_1) { __pyx_t_8 = __pyx_v_pNodeParams->_pvt_ptr; } else { __pyx_t_8 = NULL; } __pyx_v_cypNodeParams_ptr = __pyx_t_8; /* "cuda/bindings/runtime.pyx":26297 * cynode = pnode * cdef cyruntime.cudaMemsetParams* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphMemsetNodeSetParams(cynode, cypNodeParams_ptr) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":26298 * cdef cyruntime.cudaMemsetParams* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL * with nogil: * err = cyruntime.cudaGraphMemsetNodeSetParams(cynode, cypNodeParams_ptr) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_9 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGraphMemsetNodeSetParams(__pyx_v_cynode, __pyx_v_cypNodeParams_ptr); if (unlikely(__pyx_t_9 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 26298, __pyx_L7_error) __pyx_v_err = __pyx_t_9; } /* "cuda/bindings/runtime.pyx":26297 * cynode = pnode * cdef cyruntime.cudaMemsetParams* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphMemsetNodeSetParams(cynode, cypNodeParams_ptr) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":26299 * with nogil: * err = cyruntime.cudaGraphMemsetNodeSetParams(cynode, cypNodeParams_ptr) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26299, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26299, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26299, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26299, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 26299, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":26266 * return (_dict_cudaError_t[err], pNodeParams) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphMemsetNodeSetParams(node, pNodeParams : Optional[cudaMemsetParams]): * """ Sets a memset node's parameters. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphMemsetNodeSetParams", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pnode); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":26301 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphAddHostNode(graph, pDependencies : Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies, pNodeParams : Optional[cudaHostNodeParams]): * """ Creates a host execution node and adds it to a graph. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_401cudaGraphAddHostNode(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_400cudaGraphAddHostNode, "cudaGraphAddHostNode(graph, pDependencies: Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies, cudaHostNodeParams pNodeParams: Optional[cudaHostNodeParams])\n\nCreates a host execution node and adds it to a graph.\n\nCreates a new CPU execution node and adds it to `graph` with\n`numDependencies` dependencies specified via `pDependencies` and\narguments specified in `pNodeParams`. It is possible for\n`numDependencies` to be 0, in which case the node will be placed at the\nroot of the graph. `pDependencies` may not have any duplicate entries.\nA handle to the new node will be returned in `pGraphNode`.\n\nWhen the graph is launched, the node will invoke the specified CPU\nfunction. Host nodes are not supported under MPS with pre-Volta GPUs.\n\nParameters\n----------\ngraph : :py:obj:`~.CUgraph` or :py:obj:`~.cudaGraph_t`\n Graph to which to add the node\npDependencies : list[:py:obj:`~.cudaGraphNode_t`]\n Dependencies of the node\nnumDependencies : size_t\n Number of dependencies\npNodeParams : :py:obj:`~.cudaHostNodeParams`\n Parameters for the host node\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorNotSupported`, :py:obj:`~.cudaErrorInvalidValue`\npGraphNode : :py:obj:`~.cudaGraphNode_t`\n Returns newly created node\n\nSee Also\n--------\n:py:obj:`~.cudaGraphAddNode`, :py:obj:`~.cudaLaunchHostFunc`, :py:obj:`~.cudaGraphHostNodeGetParams`, :py:obj:`~.cudaGraphHostNodeSetParams`, :py:obj:`~.cudaGraphCreate`, :py:obj:`~.cudaGraphDestroyNode`, :py:obj:`~.cudaGraphAddChildGraphNode`, :py:obj:`~.cudaGraphAddEmptyNode`, :py:obj:`~.cudaGraphAddKernelNode`, :py:obj:`~.cudaGraphAddMemcpyNode`, :py:obj:`~.cudaGraphAddMemsetNode`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_401cudaGraphAddHostNode = {"cudaGraphAddHostNode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_401cudaGraphAddHostNode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_400cudaGraphAddHostNode}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_401cudaGraphAddHostNode(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_graph = 0; PyObject *__pyx_v_pDependencies = 0; size_t __pyx_v_numDependencies; struct __pyx_obj_4cuda_8bindings_7runtime_cudaHostNodeParams *__pyx_v_pNodeParams = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[4] = {0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGraphAddHostNode (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_graph_2,&__pyx_mstate_global->__pyx_n_u_pDependencies,&__pyx_mstate_global->__pyx_n_u_numDependencies,&__pyx_mstate_global->__pyx_n_u_pNodeParams,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 26301, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 26301, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 26301, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 26301, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 26301, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGraphAddHostNode", 0) < (0)) __PYX_ERR(0, 26301, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 4; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGraphAddHostNode", 1, 4, 4, i); __PYX_ERR(0, 26301, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 4)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 26301, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 26301, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 26301, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 26301, __pyx_L3_error) } __pyx_v_graph = values[0]; __pyx_v_pDependencies = values[1]; __pyx_v_numDependencies = __Pyx_PyLong_As_size_t(values[2]); if (unlikely((__pyx_v_numDependencies == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 26302, __pyx_L3_error) __pyx_v_pNodeParams = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaHostNodeParams *)values[3]); } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGraphAddHostNode", 1, 4, 4, __pyx_nargs); __PYX_ERR(0, 26301, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphAddHostNode", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_pNodeParams), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaHostNodeParams, 1, "pNodeParams", 0))) __PYX_ERR(0, 26302, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_400cudaGraphAddHostNode(__pyx_self, __pyx_v_graph, __pyx_v_pDependencies, __pyx_v_numDependencies, __pyx_v_pNodeParams); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_20cudaGraphAddHostNode_2generator98(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value); /* proto */ /* "cuda/bindings/runtime.pyx":26338 * """ * pDependencies = [] if pDependencies is None else pDependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): # <<<<<<<<<<<<<< * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_20cudaGraphAddHostNode_genexpr(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_genexpr_arg_0) { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_98_genexpr *__pyx_cur_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("genexpr", 0); __pyx_cur_scope = (struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_98_genexpr *)__pyx_tp_new_4cuda_8bindings_7runtime___pyx_scope_struct_98_genexpr(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime___pyx_scope_struct_98_genexpr, __pyx_mstate_global->__pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_98_genexpr *)Py_None); __Pyx_INCREF(Py_None); __PYX_ERR(0, 26338, __pyx_L1_error) } else { __Pyx_GOTREF((PyObject *)__pyx_cur_scope); } __pyx_cur_scope->__pyx_genexpr_arg_0 = __pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_cur_scope->__pyx_genexpr_arg_0); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_genexpr_arg_0); { __pyx_CoroutineObject *gen = __Pyx_Generator_New((__pyx_coroutine_body_t) __pyx_gb_4cuda_8bindings_7runtime_20cudaGraphAddHostNode_2generator98, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[98]), (PyObject *) __pyx_cur_scope, __pyx_mstate_global->__pyx_n_u_genexpr, __pyx_mstate_global->__pyx_n_u_cudaGraphAddHostNode_locals_gene, __pyx_mstate_global->__pyx_n_u_cuda_bindings_runtime); if (unlikely(!gen)) __PYX_ERR(0, 26338, __pyx_L1_error) __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; } /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphAddHostNode.genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_DECREF((PyObject *)__pyx_cur_scope); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_20cudaGraphAddHostNode_2generator98(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value) /* generator body */ { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_98_genexpr *__pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_98_genexpr *)__pyx_generator->closure); PyObject *__pyx_r = NULL; PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *(*__pyx_t_3)(PyObject *); PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("genexpr", 0); switch (__pyx_generator->resume_label) { case 0: goto __pyx_L3_first_run; default: /* CPython raises the right error here */ __Pyx_RefNannyFinishContext(); return NULL; } __pyx_L3_first_run:; if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 26338, __pyx_L1_error) if (unlikely(!__pyx_cur_scope->__pyx_genexpr_arg_0)) { __Pyx_RaiseUnboundLocalError(".0"); __PYX_ERR(0, 26338, __pyx_L1_error) } if (likely(PyList_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) || PyTuple_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) { __pyx_t_1 = __pyx_cur_scope->__pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; __pyx_t_3 = NULL; } else { __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_cur_scope->__pyx_genexpr_arg_0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26338, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26338, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_3)) { if (likely(PyList_CheckExact(__pyx_t_1))) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 26338, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } __pyx_t_4 = __Pyx_PyList_GetItemRef(__pyx_t_1, __pyx_t_2); ++__pyx_t_2; } else { { Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 26338, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2)); #else __pyx_t_4 = __Pyx_PySequence_ITEM(__pyx_t_1, __pyx_t_2); #endif ++__pyx_t_2; } if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26338, __pyx_L1_error) } else { __pyx_t_4 = __pyx_t_3(__pyx_t_1); if (unlikely(!__pyx_t_4)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 26338, __pyx_L1_error) PyErr_Clear(); } break; } } __Pyx_GOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v__x); __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v__x, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_TypeCheck(__pyx_cur_scope->__pyx_v__x, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); if (!__pyx_t_6) { } else { __pyx_t_5 = __pyx_t_6; goto __pyx_L7_bool_binop_done; } __pyx_t_6 = __Pyx_TypeCheck(__pyx_cur_scope->__pyx_v__x, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraphNode); __pyx_t_5 = __pyx_t_6; __pyx_L7_bool_binop_done:; __pyx_t_6 = (!__pyx_t_5); if (__pyx_t_6) { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_False); __pyx_r = Py_False; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_True); __pyx_r = Py_True; goto __pyx_L0; } CYTHON_MAYBE_UNUSED_VAR(__pyx_cur_scope); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); if (__Pyx_PyErr_Occurred()) { __Pyx_Generator_Replace_StopIteration(0); __Pyx_AddTraceback("genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); } __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); #if !CYTHON_USE_EXC_INFO_STACK __Pyx_Coroutine_ResetAndClearException(__pyx_generator); #endif __pyx_generator->resume_label = -1; __Pyx_Coroutine_clear((PyObject*)__pyx_generator); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":26301 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphAddHostNode(graph, pDependencies : Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies, pNodeParams : Optional[cudaHostNodeParams]): * """ Creates a host execution node and adds it to a graph. */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_400cudaGraphAddHostNode(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_graph, PyObject *__pyx_v_pDependencies, size_t __pyx_v_numDependencies, struct __pyx_obj_4cuda_8bindings_7runtime_cudaHostNodeParams *__pyx_v_pNodeParams) { cudaGraph_t __pyx_v_cygraph; PyObject *__pyx_v_pgraph = NULL; struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *__pyx_v_pGraphNode = 0; cudaGraphNode_t *__pyx_v_cypDependencies; Py_ssize_t __pyx_v_idx; struct cudaHostNodeParams *__pyx_v_cypNodeParams_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_gb_4cuda_8bindings_7runtime_20cudaGraphAddHostNode_2generator98 = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; Py_ssize_t __pyx_t_8; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; Py_ssize_t __pyx_t_12; Py_ssize_t __pyx_t_13; struct cudaHostNodeParams *__pyx_t_14; cudaError_t __pyx_t_15; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGraphAddHostNode", 0); __Pyx_INCREF(__pyx_v_pDependencies); /* "cuda/bindings/runtime.pyx":26337 * :py:obj:`~.cudaGraphAddNode`, :py:obj:`~.cudaLaunchHostFunc`, :py:obj:`~.cudaGraphHostNodeGetParams`, :py:obj:`~.cudaGraphHostNodeSetParams`, :py:obj:`~.cudaGraphCreate`, :py:obj:`~.cudaGraphDestroyNode`, :py:obj:`~.cudaGraphAddChildGraphNode`, :py:obj:`~.cudaGraphAddEmptyNode`, :py:obj:`~.cudaGraphAddKernelNode`, :py:obj:`~.cudaGraphAddMemcpyNode`, :py:obj:`~.cudaGraphAddMemsetNode` * """ * pDependencies = [] if pDependencies is None else pDependencies # <<<<<<<<<<<<<< * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") */ __pyx_t_2 = (__pyx_v_pDependencies == Py_None); if (__pyx_t_2) { __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26337, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = __pyx_t_3; __pyx_t_3 = 0; } else { __Pyx_INCREF(__pyx_v_pDependencies); __pyx_t_1 = __pyx_v_pDependencies; } __Pyx_DECREF_SET(__pyx_v_pDependencies, __pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":26338 * """ * pDependencies = [] if pDependencies is None else pDependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): # <<<<<<<<<<<<<< * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph */ __pyx_t_1 = __pyx_pf_4cuda_8bindings_7runtime_20cudaGraphAddHostNode_genexpr(NULL, __pyx_v_pDependencies); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26338, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_Generator_GetInlinedResult(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26338, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 26338, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_4 = (!__pyx_t_2); if (unlikely(__pyx_t_4)) { /* "cuda/bindings/runtime.pyx":26339 * pDependencies = [] if pDependencies is None else pDependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") # <<<<<<<<<<<<<< * cdef cyruntime.cudaGraph_t cygraph * if graph is None: */ __pyx_t_1 = NULL; __Pyx_INCREF(__pyx_builtin_TypeError); __pyx_t_5 = __pyx_builtin_TypeError; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_Argument_pDependencies_is_not_in}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26339, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 26339, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":26338 * """ * pDependencies = [] if pDependencies is None else pDependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): # <<<<<<<<<<<<<< * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph */ } /* "cuda/bindings/runtime.pyx":26341 * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph * if graph is None: # <<<<<<<<<<<<<< * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): */ __pyx_t_4 = (__pyx_v_graph == Py_None); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":26342 * cdef cyruntime.cudaGraph_t cygraph * if graph is None: * pgraph = 0 # <<<<<<<<<<<<<< * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): * pgraph = int(graph) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pgraph = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":26341 * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph * if graph is None: # <<<<<<<<<<<<<< * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): */ goto __pyx_L4; } /* "cuda/bindings/runtime.pyx":26343 * if graph is None: * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): # <<<<<<<<<<<<<< * pgraph = int(graph) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_graph, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); if (!__pyx_t_2) { } else { __pyx_t_4 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_graph, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraph); __pyx_t_4 = __pyx_t_2; __pyx_L5_bool_binop_done:; if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":26344 * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): * pgraph = int(graph) # <<<<<<<<<<<<<< * else: * pgraph = int(cudaGraph_t(graph)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_graph); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26344, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pgraph = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":26343 * if graph is None: * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): # <<<<<<<<<<<<<< * pgraph = int(graph) * else: */ goto __pyx_L4; } /* "cuda/bindings/runtime.pyx":26346 * pgraph = int(graph) * else: * pgraph = int(cudaGraph_t(graph)) # <<<<<<<<<<<<<< * cygraph = pgraph * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() */ /*else*/ { __pyx_t_5 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); __pyx_t_1 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_v_graph}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26346, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_1 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26346, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pgraph = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; } __pyx_L4:; /* "cuda/bindings/runtime.pyx":26347 * else: * pgraph = int(cudaGraph_t(graph)) * cygraph = pgraph # <<<<<<<<<<<<<< * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pgraph); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 26347, __pyx_L1_error) __pyx_v_cygraph = ((cudaGraph_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":26348 * pgraph = int(cudaGraph_t(graph)) * cygraph = pgraph * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() # <<<<<<<<<<<<<< * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL * if len(pDependencies) > 1: */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (1-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26348, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_pGraphNode = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":26349 * cygraph = pgraph * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL # <<<<<<<<<<<<<< * if len(pDependencies) > 1: * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) */ __pyx_v_cypDependencies = NULL; /* "cuda/bindings/runtime.pyx":26350 * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL * if len(pDependencies) > 1: # <<<<<<<<<<<<<< * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26350, __pyx_L1_error) __pyx_t_4 = (__pyx_t_8 > 1); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":26351 * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL * if len(pDependencies) > 1: * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) # <<<<<<<<<<<<<< * if cypDependencies is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26351, __pyx_L1_error) __pyx_v_cypDependencies = ((cudaGraphNode_t *)calloc(__pyx_t_8, (sizeof(cudaGraphNode_t)))); /* "cuda/bindings/runtime.pyx":26352 * if len(pDependencies) > 1: * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) * else: */ __pyx_t_4 = (__pyx_v_cypDependencies == NULL); if (unlikely(__pyx_t_4)) { /* "cuda/bindings/runtime.pyx":26353 * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) # <<<<<<<<<<<<<< * else: * for idx in range(len(pDependencies)): */ __pyx_t_5 = NULL; __Pyx_INCREF(__pyx_builtin_MemoryError); __pyx_t_3 = __pyx_builtin_MemoryError; __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26353, __pyx_L1_error) __pyx_t_9 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26353, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 26353, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_Failed_to_allocate_length_x_size, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26353, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_x_2); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 26353, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyLong_FromSize_t((sizeof(cudaGraphNode_t))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26353, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_9); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 26353, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_10, __pyx_t_11); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26353, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_9}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26353, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 26353, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":26352 * if len(pDependencies) > 1: * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) * else: */ } /* "cuda/bindings/runtime.pyx":26355 * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) * else: * for idx in range(len(pDependencies)): # <<<<<<<<<<<<<< * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] * elif len(pDependencies) == 1: */ /*else*/ { __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26355, __pyx_L1_error) __pyx_t_12 = __pyx_t_8; for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { __pyx_v_idx = __pyx_t_13; /* "cuda/bindings/runtime.pyx":26356 * else: * for idx in range(len(pDependencies)): * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] # <<<<<<<<<<<<<< * elif len(pDependencies) == 1: * cypDependencies = (pDependencies[0])._pvt_ptr */ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_pDependencies, __pyx_v_idx, Py_ssize_t, 1, PyLong_FromSsize_t, 0, 1, 1, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26356, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); (__pyx_v_cypDependencies[__pyx_v_idx]) = ((cudaGraphNode_t)(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *)__pyx_t_1)->__pyx_base._pvt_ptr[0])); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } } /* "cuda/bindings/runtime.pyx":26350 * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL * if len(pDependencies) > 1: # <<<<<<<<<<<<<< * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: */ goto __pyx_L7; } /* "cuda/bindings/runtime.pyx":26357 * for idx in range(len(pDependencies)): * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] * elif len(pDependencies) == 1: # <<<<<<<<<<<<<< * cypDependencies = (pDependencies[0])._pvt_ptr * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26357, __pyx_L1_error) __pyx_t_4 = (__pyx_t_8 == 1); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":26358 * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] * elif len(pDependencies) == 1: * cypDependencies = (pDependencies[0])._pvt_ptr # <<<<<<<<<<<<<< * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) * cdef cyruntime.cudaHostNodeParams* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL */ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_pDependencies, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26358, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_cypDependencies = ((cudaGraphNode_t *)((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *)__pyx_t_1)->__pyx_base._pvt_ptr); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":26357 * for idx in range(len(pDependencies)): * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] * elif len(pDependencies) == 1: # <<<<<<<<<<<<<< * cypDependencies = (pDependencies[0])._pvt_ptr * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) */ } __pyx_L7:; /* "cuda/bindings/runtime.pyx":26359 * elif len(pDependencies) == 1: * cypDependencies = (pDependencies[0])._pvt_ptr * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) # <<<<<<<<<<<<<< * cdef cyruntime.cudaHostNodeParams* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL * with nogil: */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26359, __pyx_L1_error) __pyx_t_4 = (__pyx_v_numDependencies > ((size_t)__pyx_t_8)); if (unlikely(__pyx_t_4)) { __pyx_t_3 = NULL; __Pyx_INCREF(__pyx_builtin_RuntimeError); __pyx_t_9 = __pyx_builtin_RuntimeError; __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26359, __pyx_L1_error) __pyx_t_5 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26359, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 26359, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_List_is_too_small, __pyx_t_11); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26359, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_11 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_5, __pyx_mstate_global->__pyx_kp_u__6); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 26359, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyLong_FromSize_t(__pyx_v_numDependencies); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26359, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 26359, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_11, __pyx_t_10); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26359, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_5}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_9, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26359, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 26359, __pyx_L1_error) } /* "cuda/bindings/runtime.pyx":26360 * cypDependencies = (pDependencies[0])._pvt_ptr * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) * cdef cyruntime.cudaHostNodeParams* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGraphAddHostNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cypNodeParams_ptr) */ __pyx_t_4 = (((PyObject *)__pyx_v_pNodeParams) != Py_None); if (__pyx_t_4) { __pyx_t_14 = __pyx_v_pNodeParams->_pvt_ptr; } else { __pyx_t_14 = NULL; } __pyx_v_cypNodeParams_ptr = __pyx_t_14; /* "cuda/bindings/runtime.pyx":26361 * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) * cdef cyruntime.cudaHostNodeParams* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphAddHostNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cypNodeParams_ptr) * if len(pDependencies) > 1 and cypDependencies is not NULL: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":26362 * cdef cyruntime.cudaHostNodeParams* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL * with nogil: * err = cyruntime.cudaGraphAddHostNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cypNodeParams_ptr) # <<<<<<<<<<<<<< * if len(pDependencies) > 1 and cypDependencies is not NULL: * free(cypDependencies) */ __pyx_t_15 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGraphAddHostNode(((cudaGraphNode_t *)__pyx_v_pGraphNode->__pyx_base._pvt_ptr), __pyx_v_cygraph, __pyx_v_cypDependencies, __pyx_v_numDependencies, __pyx_v_cypNodeParams_ptr); if (unlikely(__pyx_t_15 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 26362, __pyx_L13_error) __pyx_v_err = __pyx_t_15; } /* "cuda/bindings/runtime.pyx":26361 * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) * cdef cyruntime.cudaHostNodeParams* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphAddHostNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cypNodeParams_ptr) * if len(pDependencies) > 1 and cypDependencies is not NULL: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L14; } __pyx_L13_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L14:; } } /* "cuda/bindings/runtime.pyx":26363 * with nogil: * err = cyruntime.cudaGraphAddHostNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cypNodeParams_ptr) * if len(pDependencies) > 1 and cypDependencies is not NULL: # <<<<<<<<<<<<<< * free(cypDependencies) * if err != cyruntime.cudaSuccess: */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26363, __pyx_L1_error) __pyx_t_2 = (__pyx_t_8 > 1); if (__pyx_t_2) { } else { __pyx_t_4 = __pyx_t_2; goto __pyx_L16_bool_binop_done; } __pyx_t_2 = (__pyx_v_cypDependencies != NULL); __pyx_t_4 = __pyx_t_2; __pyx_L16_bool_binop_done:; if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":26364 * err = cyruntime.cudaGraphAddHostNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cypNodeParams_ptr) * if len(pDependencies) > 1 and cypDependencies is not NULL: * free(cypDependencies) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ free(__pyx_v_cypDependencies); /* "cuda/bindings/runtime.pyx":26363 * with nogil: * err = cyruntime.cudaGraphAddHostNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cypNodeParams_ptr) * if len(pDependencies) > 1 and cypDependencies is not NULL: # <<<<<<<<<<<<<< * free(cypDependencies) * if err != cyruntime.cudaSuccess: */ } /* "cuda/bindings/runtime.pyx":26365 * if len(pDependencies) > 1 and cypDependencies is not NULL: * free(cypDependencies) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pGraphNode) */ __pyx_t_4 = (__pyx_v_err != cudaSuccess); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":26366 * free(cypDependencies) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], pGraphNode) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26366, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_9 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26366, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_9); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26366, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26366, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 26366, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 1, Py_None) != (0)) __PYX_ERR(0, 26366, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_9; __pyx_t_9 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":26365 * if len(pDependencies) > 1 and cypDependencies is not NULL: * free(cypDependencies) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pGraphNode) */ } /* "cuda/bindings/runtime.pyx":26367 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pGraphNode) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26367, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26367, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_9, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26367, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26367, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 26367, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_pGraphNode); __Pyx_GIVEREF((PyObject *)__pyx_v_pGraphNode); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject *)__pyx_v_pGraphNode)) != (0)) __PYX_ERR(0, 26367, __pyx_L1_error); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":26301 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphAddHostNode(graph, pDependencies : Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies, pNodeParams : Optional[cudaHostNodeParams]): * """ Creates a host execution node and adds it to a graph. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphAddHostNode", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pgraph); __Pyx_XDECREF((PyObject *)__pyx_v_pGraphNode); __Pyx_XDECREF(__pyx_gb_4cuda_8bindings_7runtime_20cudaGraphAddHostNode_2generator98); __Pyx_XDECREF(__pyx_v_pDependencies); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":26369 * return (_dict_cudaError_t[err], pGraphNode) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphHostNodeGetParams(node): * """ Returns a host node's parameters. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_403cudaGraphHostNodeGetParams(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_402cudaGraphHostNodeGetParams, "cudaGraphHostNodeGetParams(node)\n\nReturns a host node's parameters.\n\nReturns the parameters of host node `node` in `pNodeParams`.\n\nParameters\n----------\nnode : :py:obj:`~.CUgraphNode` or :py:obj:`~.cudaGraphNode_t`\n Node to get the parameters for\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\npNodeParams : :py:obj:`~.cudaHostNodeParams`\n Pointer to return the parameters\n\nSee Also\n--------\n:py:obj:`~.cudaLaunchHostFunc`, :py:obj:`~.cudaGraphAddHostNode`, :py:obj:`~.cudaGraphHostNodeSetParams`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_403cudaGraphHostNodeGetParams = {"cudaGraphHostNodeGetParams", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_403cudaGraphHostNodeGetParams, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_402cudaGraphHostNodeGetParams}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_403cudaGraphHostNodeGetParams(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_node = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGraphHostNodeGetParams (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_node_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 26369, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 26369, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGraphHostNodeGetParams", 0) < (0)) __PYX_ERR(0, 26369, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGraphHostNodeGetParams", 1, 1, 1, i); __PYX_ERR(0, 26369, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 26369, __pyx_L3_error) } __pyx_v_node = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGraphHostNodeGetParams", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 26369, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphHostNodeGetParams", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_402cudaGraphHostNodeGetParams(__pyx_self, __pyx_v_node); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_402cudaGraphHostNodeGetParams(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_node) { cudaGraphNode_t __pyx_v_cynode; PyObject *__pyx_v_pnode = NULL; struct __pyx_obj_4cuda_8bindings_7runtime_cudaHostNodeParams *__pyx_v_pNodeParams = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGraphHostNodeGetParams", 0); /* "cuda/bindings/runtime.pyx":26392 * """ * cdef cyruntime.cudaGraphNode_t cynode * if node is None: # <<<<<<<<<<<<<< * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): */ __pyx_t_1 = (__pyx_v_node == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":26393 * cdef cyruntime.cudaGraphNode_t cynode * if node is None: * pnode = 0 # <<<<<<<<<<<<<< * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): * pnode = int(node) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pnode = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":26392 * """ * cdef cyruntime.cudaGraphNode_t cynode * if node is None: # <<<<<<<<<<<<<< * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":26394 * if node is None: * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): # <<<<<<<<<<<<<< * pnode = int(node) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_node, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_node, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraphNode); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":26395 * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): * pnode = int(node) # <<<<<<<<<<<<<< * else: * pnode = int(cudaGraphNode_t(node)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_node); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26395, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pnode = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":26394 * if node is None: * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): # <<<<<<<<<<<<<< * pnode = int(node) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":26397 * pnode = int(node) * else: * pnode = int(cudaGraphNode_t(node)) # <<<<<<<<<<<<<< * cynode = pnode * cdef cudaHostNodeParams pNodeParams = cudaHostNodeParams() */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_node}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26397, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pnode = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":26398 * else: * pnode = int(cudaGraphNode_t(node)) * cynode = pnode # <<<<<<<<<<<<<< * cdef cudaHostNodeParams pNodeParams = cudaHostNodeParams() * with nogil: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pnode); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 26398, __pyx_L1_error) __pyx_v_cynode = ((cudaGraphNode_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":26399 * pnode = int(cudaGraphNode_t(node)) * cynode = pnode * cdef cudaHostNodeParams pNodeParams = cudaHostNodeParams() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGraphHostNodeGetParams(cynode, pNodeParams._pvt_ptr) */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaHostNodeParams); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaHostNodeParams); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, NULL}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (1-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26399, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_pNodeParams = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaHostNodeParams *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":26400 * cynode = pnode * cdef cudaHostNodeParams pNodeParams = cudaHostNodeParams() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphHostNodeGetParams(cynode, pNodeParams._pvt_ptr) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":26401 * cdef cudaHostNodeParams pNodeParams = cudaHostNodeParams() * with nogil: * err = cyruntime.cudaGraphHostNodeGetParams(cynode, pNodeParams._pvt_ptr) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGraphHostNodeGetParams(__pyx_v_cynode, ((struct cudaHostNodeParams *)__pyx_v_pNodeParams->_pvt_ptr)); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 26401, __pyx_L7_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":26400 * cynode = pnode * cdef cudaHostNodeParams pNodeParams = cudaHostNodeParams() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphHostNodeGetParams(cynode, pNodeParams._pvt_ptr) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":26402 * with nogil: * err = cyruntime.cudaGraphHostNodeGetParams(cynode, pNodeParams._pvt_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pNodeParams) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":26403 * err = cyruntime.cudaGraphHostNodeGetParams(cynode, pNodeParams._pvt_ptr) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], pNodeParams) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26403, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26403, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26403, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26403, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 26403, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, Py_None) != (0)) __PYX_ERR(0, 26403, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":26402 * with nogil: * err = cyruntime.cudaGraphHostNodeGetParams(cynode, pNodeParams._pvt_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pNodeParams) */ } /* "cuda/bindings/runtime.pyx":26404 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pNodeParams) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26404, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26404, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26404, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26404, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 26404, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_pNodeParams); __Pyx_GIVEREF((PyObject *)__pyx_v_pNodeParams); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, ((PyObject *)__pyx_v_pNodeParams)) != (0)) __PYX_ERR(0, 26404, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":26369 * return (_dict_cudaError_t[err], pGraphNode) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphHostNodeGetParams(node): * """ Returns a host node's parameters. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphHostNodeGetParams", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pnode); __Pyx_XDECREF((PyObject *)__pyx_v_pNodeParams); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":26406 * return (_dict_cudaError_t[err], pNodeParams) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphHostNodeSetParams(node, pNodeParams : Optional[cudaHostNodeParams]): * """ Sets a host node's parameters. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_405cudaGraphHostNodeSetParams(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_404cudaGraphHostNodeSetParams, "cudaGraphHostNodeSetParams(node, cudaHostNodeParams pNodeParams: Optional[cudaHostNodeParams])\n\nSets a host node's parameters.\n\nSets the parameters of host node `node` to `nodeParams`.\n\nParameters\n----------\nnode : :py:obj:`~.CUgraphNode` or :py:obj:`~.cudaGraphNode_t`\n Node to set the parameters for\npNodeParams : :py:obj:`~.cudaHostNodeParams`\n Parameters to copy\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\n\nSee Also\n--------\n:py:obj:`~.cudaGraphNodeSetParams`, :py:obj:`~.cudaLaunchHostFunc`, :py:obj:`~.cudaGraphAddHostNode`, :py:obj:`~.cudaGraphHostNodeGetParams`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_405cudaGraphHostNodeSetParams = {"cudaGraphHostNodeSetParams", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_405cudaGraphHostNodeSetParams, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_404cudaGraphHostNodeSetParams}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_405cudaGraphHostNodeSetParams(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_node = 0; struct __pyx_obj_4cuda_8bindings_7runtime_cudaHostNodeParams *__pyx_v_pNodeParams = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGraphHostNodeSetParams (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_node_2,&__pyx_mstate_global->__pyx_n_u_pNodeParams,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 26406, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 26406, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 26406, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGraphHostNodeSetParams", 0) < (0)) __PYX_ERR(0, 26406, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGraphHostNodeSetParams", 1, 2, 2, i); __PYX_ERR(0, 26406, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 26406, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 26406, __pyx_L3_error) } __pyx_v_node = values[0]; __pyx_v_pNodeParams = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaHostNodeParams *)values[1]); } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGraphHostNodeSetParams", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 26406, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphHostNodeSetParams", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_pNodeParams), __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaHostNodeParams, 1, "pNodeParams", 0))) __PYX_ERR(0, 26407, __pyx_L1_error) __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_404cudaGraphHostNodeSetParams(__pyx_self, __pyx_v_node, __pyx_v_pNodeParams); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } goto __pyx_L7_cleaned_up; __pyx_L0:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __pyx_L7_cleaned_up:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_404cudaGraphHostNodeSetParams(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_node, struct __pyx_obj_4cuda_8bindings_7runtime_cudaHostNodeParams *__pyx_v_pNodeParams) { cudaGraphNode_t __pyx_v_cynode; PyObject *__pyx_v_pnode = NULL; struct cudaHostNodeParams *__pyx_v_cypNodeParams_ptr; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; struct cudaHostNodeParams *__pyx_t_8; cudaError_t __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGraphHostNodeSetParams", 0); /* "cuda/bindings/runtime.pyx":26429 * """ * cdef cyruntime.cudaGraphNode_t cynode * if node is None: # <<<<<<<<<<<<<< * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): */ __pyx_t_1 = (__pyx_v_node == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":26430 * cdef cyruntime.cudaGraphNode_t cynode * if node is None: * pnode = 0 # <<<<<<<<<<<<<< * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): * pnode = int(node) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pnode = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":26429 * """ * cdef cyruntime.cudaGraphNode_t cynode * if node is None: # <<<<<<<<<<<<<< * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":26431 * if node is None: * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): # <<<<<<<<<<<<<< * pnode = int(node) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_node, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_node, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraphNode); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":26432 * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): * pnode = int(node) # <<<<<<<<<<<<<< * else: * pnode = int(cudaGraphNode_t(node)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_node); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26432, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pnode = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":26431 * if node is None: * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): # <<<<<<<<<<<<<< * pnode = int(node) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":26434 * pnode = int(node) * else: * pnode = int(cudaGraphNode_t(node)) # <<<<<<<<<<<<<< * cynode = pnode * cdef cyruntime.cudaHostNodeParams* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_node}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26434, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26434, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pnode = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":26435 * else: * pnode = int(cudaGraphNode_t(node)) * cynode = pnode # <<<<<<<<<<<<<< * cdef cyruntime.cudaHostNodeParams* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL * with nogil: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pnode); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 26435, __pyx_L1_error) __pyx_v_cynode = ((cudaGraphNode_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":26436 * pnode = int(cudaGraphNode_t(node)) * cynode = pnode * cdef cyruntime.cudaHostNodeParams* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGraphHostNodeSetParams(cynode, cypNodeParams_ptr) */ __pyx_t_1 = (((PyObject *)__pyx_v_pNodeParams) != Py_None); if (__pyx_t_1) { __pyx_t_8 = __pyx_v_pNodeParams->_pvt_ptr; } else { __pyx_t_8 = NULL; } __pyx_v_cypNodeParams_ptr = __pyx_t_8; /* "cuda/bindings/runtime.pyx":26437 * cynode = pnode * cdef cyruntime.cudaHostNodeParams* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphHostNodeSetParams(cynode, cypNodeParams_ptr) * return (_dict_cudaError_t[err],) */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":26438 * cdef cyruntime.cudaHostNodeParams* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL * with nogil: * err = cyruntime.cudaGraphHostNodeSetParams(cynode, cypNodeParams_ptr) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err],) * */ __pyx_t_9 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGraphHostNodeSetParams(__pyx_v_cynode, __pyx_v_cypNodeParams_ptr); if (unlikely(__pyx_t_9 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 26438, __pyx_L7_error) __pyx_v_err = __pyx_t_9; } /* "cuda/bindings/runtime.pyx":26437 * cynode = pnode * cdef cyruntime.cudaHostNodeParams* cypNodeParams_ptr = pNodeParams._pvt_ptr if pNodeParams is not None else NULL * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphHostNodeSetParams(cynode, cypNodeParams_ptr) * return (_dict_cudaError_t[err],) */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":26439 * with nogil: * err = cyruntime.cudaGraphHostNodeSetParams(cynode, cypNodeParams_ptr) * return (_dict_cudaError_t[err],) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26439, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26439, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26439, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26439, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 26439, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":26406 * return (_dict_cudaError_t[err], pNodeParams) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphHostNodeSetParams(node, pNodeParams : Optional[cudaHostNodeParams]): * """ Sets a host node's parameters. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphHostNodeSetParams", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pnode); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":26441 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphAddChildGraphNode(graph, pDependencies : Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies, childGraph): * """ Creates a child graph node and adds it to a graph. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_407cudaGraphAddChildGraphNode(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_406cudaGraphAddChildGraphNode, "cudaGraphAddChildGraphNode(graph, pDependencies: Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies, childGraph)\n\nCreates a child graph node and adds it to a graph.\n\nCreates a new node which executes an embedded graph, and adds it to\n`graph` with `numDependencies` dependencies specified via\n`pDependencies`. It is possible for `numDependencies` to be 0, in which\ncase the node will be placed at the root of the graph. `pDependencies`\nmay not have any duplicate entries. A handle to the new node will be\nreturned in `pGraphNode`.\n\nIf `childGraph` contains allocation nodes, free nodes, or conditional\nnodes, this call will return an error.\n\nThe node executes an embedded child graph. The child graph is cloned in\nthis call.\n\nParameters\n----------\ngraph : :py:obj:`~.CUgraph` or :py:obj:`~.cudaGraph_t`\n Graph to which to add the node\npDependencies : list[:py:obj:`~.cudaGraphNode_t`]\n Dependencies of the node\nnumDependencies : size_t\n Number of dependencies\nchildGraph : :py:obj:`~.CUgraph` or :py:obj:`~.cudaGraph_t`\n The graph to clone into this node\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\npGraphNode : :py:obj:`~.cudaGraphNode_t`\n Returns newly created node\n\nSee Also\n--------\n:py:obj:`~.cudaGraphAddNode`, :py:obj:`~.cudaGraphChildGraphNodeGetGraph`, :py:obj:`~.cudaGraphCreate`, :py:obj:`~.cudaGraphDestroyNode`, :py:obj:`~.cudaGraphAddEmptyNode`, :py:obj:`~.cudaGraphAddKernelNode`, :py:obj:`~.cudaGraphAddHostNode`, :py:obj:`~.cudaGraphAddMemcpyNode`, :py:obj:`~.cudaGraphAddMemsetNode`, :py:obj:`~.cudaGraphClone`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_407cudaGraphAddChildGraphNode = {"cudaGraphAddChildGraphNode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_407cudaGraphAddChildGraphNode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_406cudaGraphAddChildGraphNode}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_407cudaGraphAddChildGraphNode(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_graph = 0; PyObject *__pyx_v_pDependencies = 0; size_t __pyx_v_numDependencies; PyObject *__pyx_v_childGraph = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[4] = {0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGraphAddChildGraphNode (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_graph_2,&__pyx_mstate_global->__pyx_n_u_pDependencies,&__pyx_mstate_global->__pyx_n_u_numDependencies,&__pyx_mstate_global->__pyx_n_u_childGraph,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 26441, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 26441, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 26441, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 26441, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 26441, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGraphAddChildGraphNode", 0) < (0)) __PYX_ERR(0, 26441, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 4; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGraphAddChildGraphNode", 1, 4, 4, i); __PYX_ERR(0, 26441, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 4)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 26441, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 26441, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 26441, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 26441, __pyx_L3_error) } __pyx_v_graph = values[0]; __pyx_v_pDependencies = values[1]; __pyx_v_numDependencies = __Pyx_PyLong_As_size_t(values[2]); if (unlikely((__pyx_v_numDependencies == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 26442, __pyx_L3_error) __pyx_v_childGraph = values[3]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGraphAddChildGraphNode", 1, 4, 4, __pyx_nargs); __PYX_ERR(0, 26441, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphAddChildGraphNode", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_406cudaGraphAddChildGraphNode(__pyx_self, __pyx_v_graph, __pyx_v_pDependencies, __pyx_v_numDependencies, __pyx_v_childGraph); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_26cudaGraphAddChildGraphNode_2generator99(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value); /* proto */ /* "cuda/bindings/runtime.pyx":26489 * cychildGraph = pchildGraph * pDependencies = [] if pDependencies is None else pDependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): # <<<<<<<<<<<<<< * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_26cudaGraphAddChildGraphNode_genexpr(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_genexpr_arg_0) { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_99_genexpr *__pyx_cur_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("genexpr", 0); __pyx_cur_scope = (struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_99_genexpr *)__pyx_tp_new_4cuda_8bindings_7runtime___pyx_scope_struct_99_genexpr(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime___pyx_scope_struct_99_genexpr, __pyx_mstate_global->__pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_99_genexpr *)Py_None); __Pyx_INCREF(Py_None); __PYX_ERR(0, 26489, __pyx_L1_error) } else { __Pyx_GOTREF((PyObject *)__pyx_cur_scope); } __pyx_cur_scope->__pyx_genexpr_arg_0 = __pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_cur_scope->__pyx_genexpr_arg_0); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_genexpr_arg_0); { __pyx_CoroutineObject *gen = __Pyx_Generator_New((__pyx_coroutine_body_t) __pyx_gb_4cuda_8bindings_7runtime_26cudaGraphAddChildGraphNode_2generator99, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[99]), (PyObject *) __pyx_cur_scope, __pyx_mstate_global->__pyx_n_u_genexpr, __pyx_mstate_global->__pyx_n_u_cudaGraphAddChildGraphNode_local, __pyx_mstate_global->__pyx_n_u_cuda_bindings_runtime); if (unlikely(!gen)) __PYX_ERR(0, 26489, __pyx_L1_error) __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; } /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphAddChildGraphNode.genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_DECREF((PyObject *)__pyx_cur_scope); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_26cudaGraphAddChildGraphNode_2generator99(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value) /* generator body */ { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_99_genexpr *__pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_99_genexpr *)__pyx_generator->closure); PyObject *__pyx_r = NULL; PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *(*__pyx_t_3)(PyObject *); PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("genexpr", 0); switch (__pyx_generator->resume_label) { case 0: goto __pyx_L3_first_run; default: /* CPython raises the right error here */ __Pyx_RefNannyFinishContext(); return NULL; } __pyx_L3_first_run:; if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 26489, __pyx_L1_error) if (unlikely(!__pyx_cur_scope->__pyx_genexpr_arg_0)) { __Pyx_RaiseUnboundLocalError(".0"); __PYX_ERR(0, 26489, __pyx_L1_error) } if (likely(PyList_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) || PyTuple_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) { __pyx_t_1 = __pyx_cur_scope->__pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; __pyx_t_3 = NULL; } else { __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_cur_scope->__pyx_genexpr_arg_0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26489, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26489, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_3)) { if (likely(PyList_CheckExact(__pyx_t_1))) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 26489, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } __pyx_t_4 = __Pyx_PyList_GetItemRef(__pyx_t_1, __pyx_t_2); ++__pyx_t_2; } else { { Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 26489, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2)); #else __pyx_t_4 = __Pyx_PySequence_ITEM(__pyx_t_1, __pyx_t_2); #endif ++__pyx_t_2; } if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26489, __pyx_L1_error) } else { __pyx_t_4 = __pyx_t_3(__pyx_t_1); if (unlikely(!__pyx_t_4)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 26489, __pyx_L1_error) PyErr_Clear(); } break; } } __Pyx_GOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v__x); __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v__x, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_TypeCheck(__pyx_cur_scope->__pyx_v__x, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); if (!__pyx_t_6) { } else { __pyx_t_5 = __pyx_t_6; goto __pyx_L7_bool_binop_done; } __pyx_t_6 = __Pyx_TypeCheck(__pyx_cur_scope->__pyx_v__x, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraphNode); __pyx_t_5 = __pyx_t_6; __pyx_L7_bool_binop_done:; __pyx_t_6 = (!__pyx_t_5); if (__pyx_t_6) { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_False); __pyx_r = Py_False; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_True); __pyx_r = Py_True; goto __pyx_L0; } CYTHON_MAYBE_UNUSED_VAR(__pyx_cur_scope); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); if (__Pyx_PyErr_Occurred()) { __Pyx_Generator_Replace_StopIteration(0); __Pyx_AddTraceback("genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); } __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); #if !CYTHON_USE_EXC_INFO_STACK __Pyx_Coroutine_ResetAndClearException(__pyx_generator); #endif __pyx_generator->resume_label = -1; __Pyx_Coroutine_clear((PyObject*)__pyx_generator); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":26441 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphAddChildGraphNode(graph, pDependencies : Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies, childGraph): * """ Creates a child graph node and adds it to a graph. */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_406cudaGraphAddChildGraphNode(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_graph, PyObject *__pyx_v_pDependencies, size_t __pyx_v_numDependencies, PyObject *__pyx_v_childGraph) { cudaGraph_t __pyx_v_cychildGraph; PyObject *__pyx_v_pchildGraph = NULL; cudaGraph_t __pyx_v_cygraph; PyObject *__pyx_v_pgraph = NULL; struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *__pyx_v_pGraphNode = 0; cudaGraphNode_t *__pyx_v_cypDependencies; Py_ssize_t __pyx_v_idx; cudaError_t __pyx_v_err; PyObject *__pyx_gb_4cuda_8bindings_7runtime_26cudaGraphAddChildGraphNode_2generator99 = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; Py_ssize_t __pyx_t_8; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; Py_ssize_t __pyx_t_12; Py_ssize_t __pyx_t_13; cudaError_t __pyx_t_14; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGraphAddChildGraphNode", 0); __Pyx_INCREF(__pyx_v_pDependencies); /* "cuda/bindings/runtime.pyx":26481 * """ * cdef cyruntime.cudaGraph_t cychildGraph * if childGraph is None: # <<<<<<<<<<<<<< * pchildGraph = 0 * elif isinstance(childGraph, (cudaGraph_t,driver.CUgraph)): */ __pyx_t_1 = (__pyx_v_childGraph == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":26482 * cdef cyruntime.cudaGraph_t cychildGraph * if childGraph is None: * pchildGraph = 0 # <<<<<<<<<<<<<< * elif isinstance(childGraph, (cudaGraph_t,driver.CUgraph)): * pchildGraph = int(childGraph) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pchildGraph = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":26481 * """ * cdef cyruntime.cudaGraph_t cychildGraph * if childGraph is None: # <<<<<<<<<<<<<< * pchildGraph = 0 * elif isinstance(childGraph, (cudaGraph_t,driver.CUgraph)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":26483 * if childGraph is None: * pchildGraph = 0 * elif isinstance(childGraph, (cudaGraph_t,driver.CUgraph)): # <<<<<<<<<<<<<< * pchildGraph = int(childGraph) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_childGraph, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_childGraph, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraph); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":26484 * pchildGraph = 0 * elif isinstance(childGraph, (cudaGraph_t,driver.CUgraph)): * pchildGraph = int(childGraph) # <<<<<<<<<<<<<< * else: * pchildGraph = int(cudaGraph_t(childGraph)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_childGraph); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26484, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pchildGraph = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":26483 * if childGraph is None: * pchildGraph = 0 * elif isinstance(childGraph, (cudaGraph_t,driver.CUgraph)): # <<<<<<<<<<<<<< * pchildGraph = int(childGraph) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":26486 * pchildGraph = int(childGraph) * else: * pchildGraph = int(cudaGraph_t(childGraph)) # <<<<<<<<<<<<<< * cychildGraph = pchildGraph * pDependencies = [] if pDependencies is None else pDependencies */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_childGraph}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26486, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26486, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pchildGraph = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":26487 * else: * pchildGraph = int(cudaGraph_t(childGraph)) * cychildGraph = pchildGraph # <<<<<<<<<<<<<< * pDependencies = [] if pDependencies is None else pDependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pchildGraph); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 26487, __pyx_L1_error) __pyx_v_cychildGraph = ((cudaGraph_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":26488 * pchildGraph = int(cudaGraph_t(childGraph)) * cychildGraph = pchildGraph * pDependencies = [] if pDependencies is None else pDependencies # <<<<<<<<<<<<<< * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") */ __pyx_t_1 = (__pyx_v_pDependencies == Py_None); if (__pyx_t_1) { __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26488, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __pyx_t_3; __pyx_t_3 = 0; } else { __Pyx_INCREF(__pyx_v_pDependencies); __pyx_t_5 = __pyx_v_pDependencies; } __Pyx_DECREF_SET(__pyx_v_pDependencies, __pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":26489 * cychildGraph = pchildGraph * pDependencies = [] if pDependencies is None else pDependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): # <<<<<<<<<<<<<< * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph */ __pyx_t_5 = __pyx_pf_4cuda_8bindings_7runtime_26cudaGraphAddChildGraphNode_genexpr(NULL, __pyx_v_pDependencies); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26489, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_Generator_GetInlinedResult(__pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26489, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 26489, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_2 = (!__pyx_t_1); if (unlikely(__pyx_t_2)) { /* "cuda/bindings/runtime.pyx":26490 * pDependencies = [] if pDependencies is None else pDependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") # <<<<<<<<<<<<<< * cdef cyruntime.cudaGraph_t cygraph * if graph is None: */ __pyx_t_5 = NULL; __Pyx_INCREF(__pyx_builtin_TypeError); __pyx_t_4 = __pyx_builtin_TypeError; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_mstate_global->__pyx_kp_u_Argument_pDependencies_is_not_in}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26490, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 26490, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":26489 * cychildGraph = pchildGraph * pDependencies = [] if pDependencies is None else pDependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): # <<<<<<<<<<<<<< * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph */ } /* "cuda/bindings/runtime.pyx":26492 * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph * if graph is None: # <<<<<<<<<<<<<< * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): */ __pyx_t_2 = (__pyx_v_graph == Py_None); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":26493 * cdef cyruntime.cudaGraph_t cygraph * if graph is None: * pgraph = 0 # <<<<<<<<<<<<<< * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): * pgraph = int(graph) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pgraph = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":26492 * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph * if graph is None: # <<<<<<<<<<<<<< * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): */ goto __pyx_L7; } /* "cuda/bindings/runtime.pyx":26494 * if graph is None: * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): # <<<<<<<<<<<<<< * pgraph = int(graph) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_graph, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); if (!__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L8_bool_binop_done; } __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_graph, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraph); __pyx_t_2 = __pyx_t_1; __pyx_L8_bool_binop_done:; if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":26495 * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): * pgraph = int(graph) # <<<<<<<<<<<<<< * else: * pgraph = int(cudaGraph_t(graph)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_graph); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26495, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pgraph = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":26494 * if graph is None: * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): # <<<<<<<<<<<<<< * pgraph = int(graph) * else: */ goto __pyx_L7; } /* "cuda/bindings/runtime.pyx":26497 * pgraph = int(graph) * else: * pgraph = int(cudaGraph_t(graph)) # <<<<<<<<<<<<<< * cygraph = pgraph * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_graph}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26497, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26497, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pgraph = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L7:; /* "cuda/bindings/runtime.pyx":26498 * else: * pgraph = int(cudaGraph_t(graph)) * cygraph = pgraph # <<<<<<<<<<<<<< * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pgraph); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 26498, __pyx_L1_error) __pyx_v_cygraph = ((cudaGraph_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":26499 * pgraph = int(cudaGraph_t(graph)) * cygraph = pgraph * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() # <<<<<<<<<<<<<< * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL * if len(pDependencies) > 1: */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, NULL}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (1-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26499, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_pGraphNode = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":26500 * cygraph = pgraph * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL # <<<<<<<<<<<<<< * if len(pDependencies) > 1: * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) */ __pyx_v_cypDependencies = NULL; /* "cuda/bindings/runtime.pyx":26501 * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL * if len(pDependencies) > 1: # <<<<<<<<<<<<<< * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26501, __pyx_L1_error) __pyx_t_2 = (__pyx_t_8 > 1); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":26502 * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL * if len(pDependencies) > 1: * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) # <<<<<<<<<<<<<< * if cypDependencies is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26502, __pyx_L1_error) __pyx_v_cypDependencies = ((cudaGraphNode_t *)calloc(__pyx_t_8, (sizeof(cudaGraphNode_t)))); /* "cuda/bindings/runtime.pyx":26503 * if len(pDependencies) > 1: * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) * else: */ __pyx_t_2 = (__pyx_v_cypDependencies == NULL); if (unlikely(__pyx_t_2)) { /* "cuda/bindings/runtime.pyx":26504 * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) # <<<<<<<<<<<<<< * else: * for idx in range(len(pDependencies)): */ __pyx_t_4 = NULL; __Pyx_INCREF(__pyx_builtin_MemoryError); __pyx_t_3 = __pyx_builtin_MemoryError; __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26504, __pyx_L1_error) __pyx_t_9 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26504, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 26504, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_Failed_to_allocate_length_x_size, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26504, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_x_2); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 26504, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyLong_FromSize_t((sizeof(cudaGraphNode_t))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26504, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_9); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 26504, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_10, __pyx_t_11); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26504, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_9}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26504, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); } __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(0, 26504, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":26503 * if len(pDependencies) > 1: * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) * else: */ } /* "cuda/bindings/runtime.pyx":26506 * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) * else: * for idx in range(len(pDependencies)): # <<<<<<<<<<<<<< * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] * elif len(pDependencies) == 1: */ /*else*/ { __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26506, __pyx_L1_error) __pyx_t_12 = __pyx_t_8; for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { __pyx_v_idx = __pyx_t_13; /* "cuda/bindings/runtime.pyx":26507 * else: * for idx in range(len(pDependencies)): * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] # <<<<<<<<<<<<<< * elif len(pDependencies) == 1: * cypDependencies = (pDependencies[0])._pvt_ptr */ __pyx_t_5 = __Pyx_GetItemInt(__pyx_v_pDependencies, __pyx_v_idx, Py_ssize_t, 1, PyLong_FromSsize_t, 0, 1, 1, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26507, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); (__pyx_v_cypDependencies[__pyx_v_idx]) = ((cudaGraphNode_t)(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *)__pyx_t_5)->__pyx_base._pvt_ptr[0])); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } } /* "cuda/bindings/runtime.pyx":26501 * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL * if len(pDependencies) > 1: # <<<<<<<<<<<<<< * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: */ goto __pyx_L10; } /* "cuda/bindings/runtime.pyx":26508 * for idx in range(len(pDependencies)): * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] * elif len(pDependencies) == 1: # <<<<<<<<<<<<<< * cypDependencies = (pDependencies[0])._pvt_ptr * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26508, __pyx_L1_error) __pyx_t_2 = (__pyx_t_8 == 1); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":26509 * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] * elif len(pDependencies) == 1: * cypDependencies = (pDependencies[0])._pvt_ptr # <<<<<<<<<<<<<< * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) * with nogil: */ __pyx_t_5 = __Pyx_GetItemInt(__pyx_v_pDependencies, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26509, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_v_cypDependencies = ((cudaGraphNode_t *)((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *)__pyx_t_5)->__pyx_base._pvt_ptr); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":26508 * for idx in range(len(pDependencies)): * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] * elif len(pDependencies) == 1: # <<<<<<<<<<<<<< * cypDependencies = (pDependencies[0])._pvt_ptr * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) */ } __pyx_L10:; /* "cuda/bindings/runtime.pyx":26510 * elif len(pDependencies) == 1: * cypDependencies = (pDependencies[0])._pvt_ptr * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGraphAddChildGraphNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cychildGraph) */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26510, __pyx_L1_error) __pyx_t_2 = (__pyx_v_numDependencies > ((size_t)__pyx_t_8)); if (unlikely(__pyx_t_2)) { __pyx_t_3 = NULL; __Pyx_INCREF(__pyx_builtin_RuntimeError); __pyx_t_9 = __pyx_builtin_RuntimeError; __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26510, __pyx_L1_error) __pyx_t_4 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 26510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_List_is_too_small, __pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_11 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_4, __pyx_mstate_global->__pyx_kp_u__6); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 26510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyLong_FromSize_t(__pyx_v_numDependencies); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 26510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_11, __pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_9, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); } __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(0, 26510, __pyx_L1_error) } /* "cuda/bindings/runtime.pyx":26511 * cypDependencies = (pDependencies[0])._pvt_ptr * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphAddChildGraphNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cychildGraph) * if len(pDependencies) > 1 and cypDependencies is not NULL: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":26512 * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) * with nogil: * err = cyruntime.cudaGraphAddChildGraphNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cychildGraph) # <<<<<<<<<<<<<< * if len(pDependencies) > 1 and cypDependencies is not NULL: * free(cypDependencies) */ __pyx_t_14 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGraphAddChildGraphNode(((cudaGraphNode_t *)__pyx_v_pGraphNode->__pyx_base._pvt_ptr), __pyx_v_cygraph, __pyx_v_cypDependencies, __pyx_v_numDependencies, __pyx_v_cychildGraph); if (unlikely(__pyx_t_14 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 26512, __pyx_L16_error) __pyx_v_err = __pyx_t_14; } /* "cuda/bindings/runtime.pyx":26511 * cypDependencies = (pDependencies[0])._pvt_ptr * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphAddChildGraphNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cychildGraph) * if len(pDependencies) > 1 and cypDependencies is not NULL: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L17; } __pyx_L16_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L17:; } } /* "cuda/bindings/runtime.pyx":26513 * with nogil: * err = cyruntime.cudaGraphAddChildGraphNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cychildGraph) * if len(pDependencies) > 1 and cypDependencies is not NULL: # <<<<<<<<<<<<<< * free(cypDependencies) * if err != cyruntime.cudaSuccess: */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26513, __pyx_L1_error) __pyx_t_1 = (__pyx_t_8 > 1); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L19_bool_binop_done; } __pyx_t_1 = (__pyx_v_cypDependencies != NULL); __pyx_t_2 = __pyx_t_1; __pyx_L19_bool_binop_done:; if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":26514 * err = cyruntime.cudaGraphAddChildGraphNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cychildGraph) * if len(pDependencies) > 1 and cypDependencies is not NULL: * free(cypDependencies) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ free(__pyx_v_cypDependencies); /* "cuda/bindings/runtime.pyx":26513 * with nogil: * err = cyruntime.cudaGraphAddChildGraphNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cychildGraph) * if len(pDependencies) > 1 and cypDependencies is not NULL: # <<<<<<<<<<<<<< * free(cypDependencies) * if err != cyruntime.cudaSuccess: */ } /* "cuda/bindings/runtime.pyx":26515 * if len(pDependencies) > 1 and cypDependencies is not NULL: * free(cypDependencies) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pGraphNode) */ __pyx_t_2 = (__pyx_v_err != cudaSuccess); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":26516 * free(cypDependencies) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], pGraphNode) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26516, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_9 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26516, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_9); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26516, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26516, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 26516, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 1, Py_None) != (0)) __PYX_ERR(0, 26516, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_9; __pyx_t_9 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":26515 * if len(pDependencies) > 1 and cypDependencies is not NULL: * free(cypDependencies) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pGraphNode) */ } /* "cuda/bindings/runtime.pyx":26517 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pGraphNode) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26517, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26517, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_9, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26517, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26517, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 26517, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_pGraphNode); __Pyx_GIVEREF((PyObject *)__pyx_v_pGraphNode); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, ((PyObject *)__pyx_v_pGraphNode)) != (0)) __PYX_ERR(0, 26517, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":26441 * return (_dict_cudaError_t[err],) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphAddChildGraphNode(graph, pDependencies : Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies, childGraph): * """ Creates a child graph node and adds it to a graph. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphAddChildGraphNode", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pchildGraph); __Pyx_XDECREF(__pyx_v_pgraph); __Pyx_XDECREF((PyObject *)__pyx_v_pGraphNode); __Pyx_XDECREF(__pyx_gb_4cuda_8bindings_7runtime_26cudaGraphAddChildGraphNode_2generator99); __Pyx_XDECREF(__pyx_v_pDependencies); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":26519 * return (_dict_cudaError_t[err], pGraphNode) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphChildGraphNodeGetGraph(node): * """ Gets a handle to the embedded graph of a child graph node. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_409cudaGraphChildGraphNodeGetGraph(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_408cudaGraphChildGraphNodeGetGraph, "cudaGraphChildGraphNodeGetGraph(node)\n\nGets a handle to the embedded graph of a child graph node.\n\nGets a handle to the embedded graph in a child graph node. This call\ndoes not clone the graph. Changes to the graph will be reflected in the\nnode, and the node retains ownership of the graph.\n\nAllocation and free nodes cannot be added to the returned graph.\nAttempting to do so will return an error.\n\nParameters\n----------\nnode : :py:obj:`~.CUgraphNode` or :py:obj:`~.cudaGraphNode_t`\n Node to get the embedded graph for\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\npGraph : :py:obj:`~.cudaGraph_t`\n Location to store a handle to the graph\n\nSee Also\n--------\n:py:obj:`~.cudaGraphAddChildGraphNode`, :py:obj:`~.cudaGraphNodeFindInClone`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_409cudaGraphChildGraphNodeGetGraph = {"cudaGraphChildGraphNodeGetGraph", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_409cudaGraphChildGraphNodeGetGraph, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_408cudaGraphChildGraphNodeGetGraph}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_409cudaGraphChildGraphNodeGetGraph(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_node = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGraphChildGraphNodeGetGraph (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_node_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 26519, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 26519, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGraphChildGraphNodeGetGraph", 0) < (0)) __PYX_ERR(0, 26519, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGraphChildGraphNodeGetGraph", 1, 1, 1, i); __PYX_ERR(0, 26519, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 26519, __pyx_L3_error) } __pyx_v_node = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGraphChildGraphNodeGetGraph", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 26519, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphChildGraphNodeGetGraph", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_408cudaGraphChildGraphNodeGetGraph(__pyx_self, __pyx_v_node); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_408cudaGraphChildGraphNodeGetGraph(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_node) { cudaGraphNode_t __pyx_v_cynode; PyObject *__pyx_v_pnode = NULL; struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraph_t *__pyx_v_pGraph = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGraphChildGraphNodeGetGraph", 0); /* "cuda/bindings/runtime.pyx":26547 * """ * cdef cyruntime.cudaGraphNode_t cynode * if node is None: # <<<<<<<<<<<<<< * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): */ __pyx_t_1 = (__pyx_v_node == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":26548 * cdef cyruntime.cudaGraphNode_t cynode * if node is None: * pnode = 0 # <<<<<<<<<<<<<< * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): * pnode = int(node) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pnode = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":26547 * """ * cdef cyruntime.cudaGraphNode_t cynode * if node is None: # <<<<<<<<<<<<<< * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":26549 * if node is None: * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): # <<<<<<<<<<<<<< * pnode = int(node) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_node, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_node, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraphNode); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":26550 * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): * pnode = int(node) # <<<<<<<<<<<<<< * else: * pnode = int(cudaGraphNode_t(node)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_node); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26550, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pnode = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":26549 * if node is None: * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): # <<<<<<<<<<<<<< * pnode = int(node) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":26552 * pnode = int(node) * else: * pnode = int(cudaGraphNode_t(node)) # <<<<<<<<<<<<<< * cynode = pnode * cdef cudaGraph_t pGraph = cudaGraph_t() */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_node}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26552, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26552, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pnode = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":26553 * else: * pnode = int(cudaGraphNode_t(node)) * cynode = pnode # <<<<<<<<<<<<<< * cdef cudaGraph_t pGraph = cudaGraph_t() * with nogil: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pnode); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 26553, __pyx_L1_error) __pyx_v_cynode = ((cudaGraphNode_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":26554 * pnode = int(cudaGraphNode_t(node)) * cynode = pnode * cdef cudaGraph_t pGraph = cudaGraph_t() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGraphChildGraphNodeGetGraph(cynode, pGraph._pvt_ptr) */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, NULL}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (1-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26554, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_pGraph = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraph_t *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":26555 * cynode = pnode * cdef cudaGraph_t pGraph = cudaGraph_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphChildGraphNodeGetGraph(cynode, pGraph._pvt_ptr) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":26556 * cdef cudaGraph_t pGraph = cudaGraph_t() * with nogil: * err = cyruntime.cudaGraphChildGraphNodeGetGraph(cynode, pGraph._pvt_ptr) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGraphChildGraphNodeGetGraph(__pyx_v_cynode, ((cudaGraph_t *)__pyx_v_pGraph->__pyx_base._pvt_ptr)); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 26556, __pyx_L7_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":26555 * cynode = pnode * cdef cudaGraph_t pGraph = cudaGraph_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphChildGraphNodeGetGraph(cynode, pGraph._pvt_ptr) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":26557 * with nogil: * err = cyruntime.cudaGraphChildGraphNodeGetGraph(cynode, pGraph._pvt_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pGraph) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":26558 * err = cyruntime.cudaGraphChildGraphNodeGetGraph(cynode, pGraph._pvt_ptr) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], pGraph) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26558, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26558, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26558, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26558, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 26558, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, Py_None) != (0)) __PYX_ERR(0, 26558, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":26557 * with nogil: * err = cyruntime.cudaGraphChildGraphNodeGetGraph(cynode, pGraph._pvt_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pGraph) */ } /* "cuda/bindings/runtime.pyx":26559 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pGraph) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26559, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26559, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26559, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26559, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 26559, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_pGraph); __Pyx_GIVEREF((PyObject *)__pyx_v_pGraph); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, ((PyObject *)__pyx_v_pGraph)) != (0)) __PYX_ERR(0, 26559, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":26519 * return (_dict_cudaError_t[err], pGraphNode) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphChildGraphNodeGetGraph(node): * """ Gets a handle to the embedded graph of a child graph node. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphChildGraphNodeGetGraph", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pnode); __Pyx_XDECREF((PyObject *)__pyx_v_pGraph); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":26561 * return (_dict_cudaError_t[err], pGraph) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphAddEmptyNode(graph, pDependencies : Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies): * """ Creates an empty node and adds it to a graph. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_411cudaGraphAddEmptyNode(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_410cudaGraphAddEmptyNode, "cudaGraphAddEmptyNode(graph, pDependencies: Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies)\n\nCreates an empty node and adds it to a graph.\n\nCreates a new node which performs no operation, and adds it to `graph`\nwith `numDependencies` dependencies specified via `pDependencies`. It\nis possible for `numDependencies` to be 0, in which case the node will\nbe placed at the root of the graph. `pDependencies` may not have any\nduplicate entries. A handle to the new node will be returned in\n`pGraphNode`.\n\nAn empty node performs no operation during execution, but can be used\nfor transitive ordering. For example, a phased execution graph with 2\ngroups of n nodes with a barrier between them can be represented using\nan empty node and 2*n dependency edges, rather than no empty node and\nn^2 dependency edges.\n\nParameters\n----------\ngraph : :py:obj:`~.CUgraph` or :py:obj:`~.cudaGraph_t`\n Graph to which to add the node\npDependencies : list[:py:obj:`~.cudaGraphNode_t`]\n Dependencies of the node\nnumDependencies : size_t\n Number of dependencies\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\npGraphNode : :py:obj:`~.cudaGraphNode_t`\n Returns newly created node\n\nSee Also\n--------\n:py:obj:`~.cudaGraphAddNode`, :py:obj:`~.cudaGraphCreate`, :py:obj:`~.cudaGraphDestroyNode`, :py:obj:`~.cudaGraphAddChildGraphNode`, :py:obj:`~.cudaGraphAddKernelNode`, :py:obj:`~.cudaGraphAddHostNode`, :py:obj:`~.cudaGraphAddMemcpyNode`, :py:obj:`~.cudaGraphAddMemsetNode`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_411cudaGraphAddEmptyNode = {"cudaGraphAddEmptyNode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_411cudaGraphAddEmptyNode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_410cudaGraphAddEmptyNode}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_411cudaGraphAddEmptyNode(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_graph = 0; PyObject *__pyx_v_pDependencies = 0; size_t __pyx_v_numDependencies; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[3] = {0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGraphAddEmptyNode (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_graph_2,&__pyx_mstate_global->__pyx_n_u_pDependencies,&__pyx_mstate_global->__pyx_n_u_numDependencies,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 26561, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 26561, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 26561, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 26561, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGraphAddEmptyNode", 0) < (0)) __PYX_ERR(0, 26561, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 3; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGraphAddEmptyNode", 1, 3, 3, i); __PYX_ERR(0, 26561, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 3)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 26561, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 26561, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 26561, __pyx_L3_error) } __pyx_v_graph = values[0]; __pyx_v_pDependencies = values[1]; __pyx_v_numDependencies = __Pyx_PyLong_As_size_t(values[2]); if (unlikely((__pyx_v_numDependencies == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 26562, __pyx_L3_error) } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGraphAddEmptyNode", 1, 3, 3, __pyx_nargs); __PYX_ERR(0, 26561, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphAddEmptyNode", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_410cudaGraphAddEmptyNode(__pyx_self, __pyx_v_graph, __pyx_v_pDependencies, __pyx_v_numDependencies); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_21cudaGraphAddEmptyNode_2generator100(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value); /* proto */ /* "cuda/bindings/runtime.pyx":26599 * """ * pDependencies = [] if pDependencies is None else pDependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): # <<<<<<<<<<<<<< * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_21cudaGraphAddEmptyNode_genexpr(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_genexpr_arg_0) { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_100_genexpr *__pyx_cur_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("genexpr", 0); __pyx_cur_scope = (struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_100_genexpr *)__pyx_tp_new_4cuda_8bindings_7runtime___pyx_scope_struct_100_genexpr(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime___pyx_scope_struct_100_genexpr, __pyx_mstate_global->__pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_100_genexpr *)Py_None); __Pyx_INCREF(Py_None); __PYX_ERR(0, 26599, __pyx_L1_error) } else { __Pyx_GOTREF((PyObject *)__pyx_cur_scope); } __pyx_cur_scope->__pyx_genexpr_arg_0 = __pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_cur_scope->__pyx_genexpr_arg_0); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_genexpr_arg_0); { __pyx_CoroutineObject *gen = __Pyx_Generator_New((__pyx_coroutine_body_t) __pyx_gb_4cuda_8bindings_7runtime_21cudaGraphAddEmptyNode_2generator100, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[100]), (PyObject *) __pyx_cur_scope, __pyx_mstate_global->__pyx_n_u_genexpr, __pyx_mstate_global->__pyx_n_u_cudaGraphAddEmptyNode_locals_gen, __pyx_mstate_global->__pyx_n_u_cuda_bindings_runtime); if (unlikely(!gen)) __PYX_ERR(0, 26599, __pyx_L1_error) __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; } /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphAddEmptyNode.genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_DECREF((PyObject *)__pyx_cur_scope); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_21cudaGraphAddEmptyNode_2generator100(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value) /* generator body */ { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_100_genexpr *__pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_100_genexpr *)__pyx_generator->closure); PyObject *__pyx_r = NULL; PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *(*__pyx_t_3)(PyObject *); PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("genexpr", 0); switch (__pyx_generator->resume_label) { case 0: goto __pyx_L3_first_run; default: /* CPython raises the right error here */ __Pyx_RefNannyFinishContext(); return NULL; } __pyx_L3_first_run:; if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 26599, __pyx_L1_error) if (unlikely(!__pyx_cur_scope->__pyx_genexpr_arg_0)) { __Pyx_RaiseUnboundLocalError(".0"); __PYX_ERR(0, 26599, __pyx_L1_error) } if (likely(PyList_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) || PyTuple_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) { __pyx_t_1 = __pyx_cur_scope->__pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; __pyx_t_3 = NULL; } else { __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_cur_scope->__pyx_genexpr_arg_0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26599, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26599, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_3)) { if (likely(PyList_CheckExact(__pyx_t_1))) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 26599, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } __pyx_t_4 = __Pyx_PyList_GetItemRef(__pyx_t_1, __pyx_t_2); ++__pyx_t_2; } else { { Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 26599, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2)); #else __pyx_t_4 = __Pyx_PySequence_ITEM(__pyx_t_1, __pyx_t_2); #endif ++__pyx_t_2; } if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26599, __pyx_L1_error) } else { __pyx_t_4 = __pyx_t_3(__pyx_t_1); if (unlikely(!__pyx_t_4)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 26599, __pyx_L1_error) PyErr_Clear(); } break; } } __Pyx_GOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v__x); __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v__x, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_TypeCheck(__pyx_cur_scope->__pyx_v__x, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); if (!__pyx_t_6) { } else { __pyx_t_5 = __pyx_t_6; goto __pyx_L7_bool_binop_done; } __pyx_t_6 = __Pyx_TypeCheck(__pyx_cur_scope->__pyx_v__x, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraphNode); __pyx_t_5 = __pyx_t_6; __pyx_L7_bool_binop_done:; __pyx_t_6 = (!__pyx_t_5); if (__pyx_t_6) { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_False); __pyx_r = Py_False; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_True); __pyx_r = Py_True; goto __pyx_L0; } CYTHON_MAYBE_UNUSED_VAR(__pyx_cur_scope); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); if (__Pyx_PyErr_Occurred()) { __Pyx_Generator_Replace_StopIteration(0); __Pyx_AddTraceback("genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); } __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); #if !CYTHON_USE_EXC_INFO_STACK __Pyx_Coroutine_ResetAndClearException(__pyx_generator); #endif __pyx_generator->resume_label = -1; __Pyx_Coroutine_clear((PyObject*)__pyx_generator); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":26561 * return (_dict_cudaError_t[err], pGraph) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphAddEmptyNode(graph, pDependencies : Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies): * """ Creates an empty node and adds it to a graph. */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_410cudaGraphAddEmptyNode(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_graph, PyObject *__pyx_v_pDependencies, size_t __pyx_v_numDependencies) { cudaGraph_t __pyx_v_cygraph; PyObject *__pyx_v_pgraph = NULL; struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *__pyx_v_pGraphNode = 0; cudaGraphNode_t *__pyx_v_cypDependencies; Py_ssize_t __pyx_v_idx; cudaError_t __pyx_v_err; PyObject *__pyx_gb_4cuda_8bindings_7runtime_21cudaGraphAddEmptyNode_2generator100 = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; Py_ssize_t __pyx_t_8; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; Py_ssize_t __pyx_t_12; Py_ssize_t __pyx_t_13; cudaError_t __pyx_t_14; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGraphAddEmptyNode", 0); __Pyx_INCREF(__pyx_v_pDependencies); /* "cuda/bindings/runtime.pyx":26598 * :py:obj:`~.cudaGraphAddNode`, :py:obj:`~.cudaGraphCreate`, :py:obj:`~.cudaGraphDestroyNode`, :py:obj:`~.cudaGraphAddChildGraphNode`, :py:obj:`~.cudaGraphAddKernelNode`, :py:obj:`~.cudaGraphAddHostNode`, :py:obj:`~.cudaGraphAddMemcpyNode`, :py:obj:`~.cudaGraphAddMemsetNode` * """ * pDependencies = [] if pDependencies is None else pDependencies # <<<<<<<<<<<<<< * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") */ __pyx_t_2 = (__pyx_v_pDependencies == Py_None); if (__pyx_t_2) { __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_1 = __pyx_t_3; __pyx_t_3 = 0; } else { __Pyx_INCREF(__pyx_v_pDependencies); __pyx_t_1 = __pyx_v_pDependencies; } __Pyx_DECREF_SET(__pyx_v_pDependencies, __pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":26599 * """ * pDependencies = [] if pDependencies is None else pDependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): # <<<<<<<<<<<<<< * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph */ __pyx_t_1 = __pyx_pf_4cuda_8bindings_7runtime_21cudaGraphAddEmptyNode_genexpr(NULL, __pyx_v_pDependencies); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26599, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_Generator_GetInlinedResult(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26599, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 26599, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_4 = (!__pyx_t_2); if (unlikely(__pyx_t_4)) { /* "cuda/bindings/runtime.pyx":26600 * pDependencies = [] if pDependencies is None else pDependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") # <<<<<<<<<<<<<< * cdef cyruntime.cudaGraph_t cygraph * if graph is None: */ __pyx_t_1 = NULL; __Pyx_INCREF(__pyx_builtin_TypeError); __pyx_t_5 = __pyx_builtin_TypeError; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_1, __pyx_mstate_global->__pyx_kp_u_Argument_pDependencies_is_not_in}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26600, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 26600, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":26599 * """ * pDependencies = [] if pDependencies is None else pDependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): # <<<<<<<<<<<<<< * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph */ } /* "cuda/bindings/runtime.pyx":26602 * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph * if graph is None: # <<<<<<<<<<<<<< * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): */ __pyx_t_4 = (__pyx_v_graph == Py_None); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":26603 * cdef cyruntime.cudaGraph_t cygraph * if graph is None: * pgraph = 0 # <<<<<<<<<<<<<< * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): * pgraph = int(graph) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pgraph = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":26602 * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph * if graph is None: # <<<<<<<<<<<<<< * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): */ goto __pyx_L4; } /* "cuda/bindings/runtime.pyx":26604 * if graph is None: * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): # <<<<<<<<<<<<<< * pgraph = int(graph) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_graph, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); if (!__pyx_t_2) { } else { __pyx_t_4 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_graph, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraph); __pyx_t_4 = __pyx_t_2; __pyx_L5_bool_binop_done:; if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":26605 * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): * pgraph = int(graph) # <<<<<<<<<<<<<< * else: * pgraph = int(cudaGraph_t(graph)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_graph); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26605, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pgraph = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":26604 * if graph is None: * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): # <<<<<<<<<<<<<< * pgraph = int(graph) * else: */ goto __pyx_L4; } /* "cuda/bindings/runtime.pyx":26607 * pgraph = int(graph) * else: * pgraph = int(cudaGraph_t(graph)) # <<<<<<<<<<<<<< * cygraph = pgraph * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() */ /*else*/ { __pyx_t_5 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); __pyx_t_1 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_v_graph}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26607, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_1 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26607, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pgraph = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; } __pyx_L4:; /* "cuda/bindings/runtime.pyx":26608 * else: * pgraph = int(cudaGraph_t(graph)) * cygraph = pgraph # <<<<<<<<<<<<<< * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pgraph); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 26608, __pyx_L1_error) __pyx_v_cygraph = ((cudaGraph_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":26609 * pgraph = int(cudaGraph_t(graph)) * cygraph = pgraph * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() # <<<<<<<<<<<<<< * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL * if len(pDependencies) > 1: */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, NULL}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (1-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26609, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_1); } __pyx_v_pGraphNode = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *)__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":26610 * cygraph = pgraph * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL # <<<<<<<<<<<<<< * if len(pDependencies) > 1: * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) */ __pyx_v_cypDependencies = NULL; /* "cuda/bindings/runtime.pyx":26611 * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL * if len(pDependencies) > 1: # <<<<<<<<<<<<<< * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26611, __pyx_L1_error) __pyx_t_4 = (__pyx_t_8 > 1); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":26612 * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL * if len(pDependencies) > 1: * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) # <<<<<<<<<<<<<< * if cypDependencies is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26612, __pyx_L1_error) __pyx_v_cypDependencies = ((cudaGraphNode_t *)calloc(__pyx_t_8, (sizeof(cudaGraphNode_t)))); /* "cuda/bindings/runtime.pyx":26613 * if len(pDependencies) > 1: * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) * else: */ __pyx_t_4 = (__pyx_v_cypDependencies == NULL); if (unlikely(__pyx_t_4)) { /* "cuda/bindings/runtime.pyx":26614 * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) # <<<<<<<<<<<<<< * else: * for idx in range(len(pDependencies)): */ __pyx_t_5 = NULL; __Pyx_INCREF(__pyx_builtin_MemoryError); __pyx_t_3 = __pyx_builtin_MemoryError; __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26614, __pyx_L1_error) __pyx_t_9 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26614, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 26614, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_Failed_to_allocate_length_x_size, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26614, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_x_2); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 26614, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyLong_FromSize_t((sizeof(cudaGraphNode_t))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26614, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_9); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 26614, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_10, __pyx_t_11); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26614, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_9}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26614, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 26614, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":26613 * if len(pDependencies) > 1: * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) * else: */ } /* "cuda/bindings/runtime.pyx":26616 * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) * else: * for idx in range(len(pDependencies)): # <<<<<<<<<<<<<< * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] * elif len(pDependencies) == 1: */ /*else*/ { __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26616, __pyx_L1_error) __pyx_t_12 = __pyx_t_8; for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { __pyx_v_idx = __pyx_t_13; /* "cuda/bindings/runtime.pyx":26617 * else: * for idx in range(len(pDependencies)): * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] # <<<<<<<<<<<<<< * elif len(pDependencies) == 1: * cypDependencies = (pDependencies[0])._pvt_ptr */ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_pDependencies, __pyx_v_idx, Py_ssize_t, 1, PyLong_FromSsize_t, 0, 1, 1, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26617, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); (__pyx_v_cypDependencies[__pyx_v_idx]) = ((cudaGraphNode_t)(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *)__pyx_t_1)->__pyx_base._pvt_ptr[0])); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } } /* "cuda/bindings/runtime.pyx":26611 * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL * if len(pDependencies) > 1: # <<<<<<<<<<<<<< * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: */ goto __pyx_L7; } /* "cuda/bindings/runtime.pyx":26618 * for idx in range(len(pDependencies)): * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] * elif len(pDependencies) == 1: # <<<<<<<<<<<<<< * cypDependencies = (pDependencies[0])._pvt_ptr * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26618, __pyx_L1_error) __pyx_t_4 = (__pyx_t_8 == 1); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":26619 * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] * elif len(pDependencies) == 1: * cypDependencies = (pDependencies[0])._pvt_ptr # <<<<<<<<<<<<<< * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) * with nogil: */ __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_pDependencies, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26619, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_cypDependencies = ((cudaGraphNode_t *)((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *)__pyx_t_1)->__pyx_base._pvt_ptr); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "cuda/bindings/runtime.pyx":26618 * for idx in range(len(pDependencies)): * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] * elif len(pDependencies) == 1: # <<<<<<<<<<<<<< * cypDependencies = (pDependencies[0])._pvt_ptr * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) */ } __pyx_L7:; /* "cuda/bindings/runtime.pyx":26620 * elif len(pDependencies) == 1: * cypDependencies = (pDependencies[0])._pvt_ptr * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGraphAddEmptyNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies) */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26620, __pyx_L1_error) __pyx_t_4 = (__pyx_v_numDependencies > ((size_t)__pyx_t_8)); if (unlikely(__pyx_t_4)) { __pyx_t_3 = NULL; __Pyx_INCREF(__pyx_builtin_RuntimeError); __pyx_t_9 = __pyx_builtin_RuntimeError; __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26620, __pyx_L1_error) __pyx_t_5 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26620, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 26620, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_List_is_too_small, __pyx_t_11); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26620, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_11 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_5, __pyx_mstate_global->__pyx_kp_u__6); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 26620, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyLong_FromSize_t(__pyx_v_numDependencies); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26620, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_5); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 26620, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_11, __pyx_t_10); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26620, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_5}; __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_9, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26620, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); } __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(0, 26620, __pyx_L1_error) } /* "cuda/bindings/runtime.pyx":26621 * cypDependencies = (pDependencies[0])._pvt_ptr * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphAddEmptyNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies) * if len(pDependencies) > 1 and cypDependencies is not NULL: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":26622 * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) * with nogil: * err = cyruntime.cudaGraphAddEmptyNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies) # <<<<<<<<<<<<<< * if len(pDependencies) > 1 and cypDependencies is not NULL: * free(cypDependencies) */ __pyx_t_14 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGraphAddEmptyNode(((cudaGraphNode_t *)__pyx_v_pGraphNode->__pyx_base._pvt_ptr), __pyx_v_cygraph, __pyx_v_cypDependencies, __pyx_v_numDependencies); if (unlikely(__pyx_t_14 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 26622, __pyx_L13_error) __pyx_v_err = __pyx_t_14; } /* "cuda/bindings/runtime.pyx":26621 * cypDependencies = (pDependencies[0])._pvt_ptr * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphAddEmptyNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies) * if len(pDependencies) > 1 and cypDependencies is not NULL: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L14; } __pyx_L13_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L14:; } } /* "cuda/bindings/runtime.pyx":26623 * with nogil: * err = cyruntime.cudaGraphAddEmptyNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies) * if len(pDependencies) > 1 and cypDependencies is not NULL: # <<<<<<<<<<<<<< * free(cypDependencies) * if err != cyruntime.cudaSuccess: */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26623, __pyx_L1_error) __pyx_t_2 = (__pyx_t_8 > 1); if (__pyx_t_2) { } else { __pyx_t_4 = __pyx_t_2; goto __pyx_L16_bool_binop_done; } __pyx_t_2 = (__pyx_v_cypDependencies != NULL); __pyx_t_4 = __pyx_t_2; __pyx_L16_bool_binop_done:; if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":26624 * err = cyruntime.cudaGraphAddEmptyNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies) * if len(pDependencies) > 1 and cypDependencies is not NULL: * free(cypDependencies) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ free(__pyx_v_cypDependencies); /* "cuda/bindings/runtime.pyx":26623 * with nogil: * err = cyruntime.cudaGraphAddEmptyNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies) * if len(pDependencies) > 1 and cypDependencies is not NULL: # <<<<<<<<<<<<<< * free(cypDependencies) * if err != cyruntime.cudaSuccess: */ } /* "cuda/bindings/runtime.pyx":26625 * if len(pDependencies) > 1 and cypDependencies is not NULL: * free(cypDependencies) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pGraphNode) */ __pyx_t_4 = (__pyx_v_err != cudaSuccess); if (__pyx_t_4) { /* "cuda/bindings/runtime.pyx":26626 * free(cypDependencies) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], pGraphNode) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26626, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_9 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26626, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_t_9); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26626, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26626, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 26626, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 1, Py_None) != (0)) __PYX_ERR(0, 26626, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_9; __pyx_t_9 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":26625 * if len(pDependencies) > 1 and cypDependencies is not NULL: * free(cypDependencies) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pGraphNode) */ } /* "cuda/bindings/runtime.pyx":26627 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pGraphNode) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26627, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_5 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26627, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_t_9, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26627, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26627, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1) != (0)) __PYX_ERR(0, 26627, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_pGraphNode); __Pyx_GIVEREF((PyObject *)__pyx_v_pGraphNode); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_5, 1, ((PyObject *)__pyx_v_pGraphNode)) != (0)) __PYX_ERR(0, 26627, __pyx_L1_error); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":26561 * return (_dict_cudaError_t[err], pGraph) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphAddEmptyNode(graph, pDependencies : Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies): * """ Creates an empty node and adds it to a graph. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphAddEmptyNode", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pgraph); __Pyx_XDECREF((PyObject *)__pyx_v_pGraphNode); __Pyx_XDECREF(__pyx_gb_4cuda_8bindings_7runtime_21cudaGraphAddEmptyNode_2generator100); __Pyx_XDECREF(__pyx_v_pDependencies); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":26629 * return (_dict_cudaError_t[err], pGraphNode) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphAddEventRecordNode(graph, pDependencies : Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies, event): * """ Creates an event record node and adds it to a graph. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_413cudaGraphAddEventRecordNode(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_412cudaGraphAddEventRecordNode, "cudaGraphAddEventRecordNode(graph, pDependencies: Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies, event)\n\nCreates an event record node and adds it to a graph.\n\nCreates a new event record node and adds it to `hGraph` with\n`numDependencies` dependencies specified via `dependencies` and event\nspecified in `event`. It is possible for `numDependencies` to be 0, in\nwhich case the node will be placed at the root of the graph.\n`dependencies` may not have any duplicate entries. A handle to the new\nnode will be returned in `phGraphNode`.\n\nEach launch of the graph will record `event` to capture execution of\nthe node's dependencies.\n\nThese nodes may not be used in loops or conditionals.\n\nParameters\n----------\nhGraph : :py:obj:`~.CUgraph` or :py:obj:`~.cudaGraph_t`\n Graph to which to add the node\ndependencies : list[:py:obj:`~.cudaGraphNode_t`]\n Dependencies of the node\nnumDependencies : size_t\n Number of dependencies\nevent : :py:obj:`~.CUevent` or :py:obj:`~.cudaEvent_t`\n Event for the node\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\nphGraphNode : :py:obj:`~.cudaGraphNode_t`\n Returns newly created node\n\nSee Also\n--------\n:py:obj:`~.cudaGraphAddNode`, :py:obj:`~.cudaGraphAddEventWaitNode`, :py:obj:`~.cudaEventRecordWithFlags`, :py:obj:`~.cudaStreamWaitEvent`, :py:obj:`~.cudaGraphCreate`, :py:obj:`~.cudaGraphDestroyNode`, :py:obj:`~.cudaGraphAddChildGraphNode`, :py:obj:`~.cudaGraphAddEmptyNode`, :py:obj:`~.cudaGraphAddKernelNode`, :py:obj:`~.cudaGraphAddMemcpyNode`, :py:obj:`~.cudaGraphAddMemsetNode`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_413cudaGraphAddEventRecordNode = {"cudaGraphAddEventRecordNode", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_413cudaGraphAddEventRecordNode, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_412cudaGraphAddEventRecordNode}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_413cudaGraphAddEventRecordNode(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_graph = 0; PyObject *__pyx_v_pDependencies = 0; size_t __pyx_v_numDependencies; PyObject *__pyx_v_event = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[4] = {0,0,0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGraphAddEventRecordNode (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_graph_2,&__pyx_mstate_global->__pyx_n_u_pDependencies,&__pyx_mstate_global->__pyx_n_u_numDependencies,&__pyx_mstate_global->__pyx_n_u_event_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 26629, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 4: values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 26629, __pyx_L3_error) CYTHON_FALLTHROUGH; case 3: values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 26629, __pyx_L3_error) CYTHON_FALLTHROUGH; case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 26629, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 26629, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGraphAddEventRecordNode", 0) < (0)) __PYX_ERR(0, 26629, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 4; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGraphAddEventRecordNode", 1, 4, 4, i); __PYX_ERR(0, 26629, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 4)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 26629, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 26629, __pyx_L3_error) values[2] = __Pyx_ArgRef_FASTCALL(__pyx_args, 2); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[2])) __PYX_ERR(0, 26629, __pyx_L3_error) values[3] = __Pyx_ArgRef_FASTCALL(__pyx_args, 3); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[3])) __PYX_ERR(0, 26629, __pyx_L3_error) } __pyx_v_graph = values[0]; __pyx_v_pDependencies = values[1]; __pyx_v_numDependencies = __Pyx_PyLong_As_size_t(values[2]); if (unlikely((__pyx_v_numDependencies == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 26630, __pyx_L3_error) __pyx_v_event = values[3]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGraphAddEventRecordNode", 1, 4, 4, __pyx_nargs); __PYX_ERR(0, 26629, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphAddEventRecordNode", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_412cudaGraphAddEventRecordNode(__pyx_self, __pyx_v_graph, __pyx_v_pDependencies, __pyx_v_numDependencies, __pyx_v_event); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_27cudaGraphAddEventRecordNode_2generator101(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value); /* proto */ /* "cuda/bindings/runtime.pyx":26676 * cyevent = pevent * pDependencies = [] if pDependencies is None else pDependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): # <<<<<<<<<<<<<< * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_27cudaGraphAddEventRecordNode_genexpr(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_genexpr_arg_0) { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_101_genexpr *__pyx_cur_scope; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("genexpr", 0); __pyx_cur_scope = (struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_101_genexpr *)__pyx_tp_new_4cuda_8bindings_7runtime___pyx_scope_struct_101_genexpr(__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime___pyx_scope_struct_101_genexpr, __pyx_mstate_global->__pyx_empty_tuple, NULL); if (unlikely(!__pyx_cur_scope)) { __pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_101_genexpr *)Py_None); __Pyx_INCREF(Py_None); __PYX_ERR(0, 26676, __pyx_L1_error) } else { __Pyx_GOTREF((PyObject *)__pyx_cur_scope); } __pyx_cur_scope->__pyx_genexpr_arg_0 = __pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_cur_scope->__pyx_genexpr_arg_0); __Pyx_GIVEREF(__pyx_cur_scope->__pyx_genexpr_arg_0); { __pyx_CoroutineObject *gen = __Pyx_Generator_New((__pyx_coroutine_body_t) __pyx_gb_4cuda_8bindings_7runtime_27cudaGraphAddEventRecordNode_2generator101, ((PyObject *)__pyx_mstate_global->__pyx_codeobj_tab[101]), (PyObject *) __pyx_cur_scope, __pyx_mstate_global->__pyx_n_u_genexpr, __pyx_mstate_global->__pyx_n_u_cudaGraphAddEventRecordNode_loca, __pyx_mstate_global->__pyx_n_u_cuda_bindings_runtime); if (unlikely(!gen)) __PYX_ERR(0, 26676, __pyx_L1_error) __Pyx_DECREF(__pyx_cur_scope); __Pyx_RefNannyFinishContext(); return (PyObject *) gen; } /* function exit code */ __pyx_L1_error:; __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphAddEventRecordNode.genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_DECREF((PyObject *)__pyx_cur_scope); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_gb_4cuda_8bindings_7runtime_27cudaGraphAddEventRecordNode_2generator101(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value) /* generator body */ { struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_101_genexpr *__pyx_cur_scope = ((struct __pyx_obj_4cuda_8bindings_7runtime___pyx_scope_struct_101_genexpr *)__pyx_generator->closure); PyObject *__pyx_r = NULL; PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *(*__pyx_t_3)(PyObject *); PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("genexpr", 0); switch (__pyx_generator->resume_label) { case 0: goto __pyx_L3_first_run; default: /* CPython raises the right error here */ __Pyx_RefNannyFinishContext(); return NULL; } __pyx_L3_first_run:; if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 26676, __pyx_L1_error) if (unlikely(!__pyx_cur_scope->__pyx_genexpr_arg_0)) { __Pyx_RaiseUnboundLocalError(".0"); __PYX_ERR(0, 26676, __pyx_L1_error) } if (likely(PyList_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) || PyTuple_CheckExact(__pyx_cur_scope->__pyx_genexpr_arg_0)) { __pyx_t_1 = __pyx_cur_scope->__pyx_genexpr_arg_0; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; __pyx_t_3 = NULL; } else { __pyx_t_2 = -1; __pyx_t_1 = PyObject_GetIter(__pyx_cur_scope->__pyx_genexpr_arg_0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26676, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (CYTHON_COMPILING_IN_LIMITED_API) ? PyIter_Next : __Pyx_PyObject_GetIterNextFunc(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26676, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_3)) { if (likely(PyList_CheckExact(__pyx_t_1))) { { Py_ssize_t __pyx_temp = __Pyx_PyList_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 26676, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } __pyx_t_4 = __Pyx_PyList_GetItemRef(__pyx_t_1, __pyx_t_2); ++__pyx_t_2; } else { { Py_ssize_t __pyx_temp = __Pyx_PyTuple_GET_SIZE(__pyx_t_1); #if !CYTHON_ASSUME_SAFE_SIZE if (unlikely((__pyx_temp < 0))) __PYX_ERR(0, 26676, __pyx_L1_error) #endif if (__pyx_t_2 >= __pyx_temp) break; } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = __Pyx_NewRef(PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2)); #else __pyx_t_4 = __Pyx_PySequence_ITEM(__pyx_t_1, __pyx_t_2); #endif ++__pyx_t_2; } if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26676, __pyx_L1_error) } else { __pyx_t_4 = __pyx_t_3(__pyx_t_1); if (unlikely(!__pyx_t_4)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) __PYX_ERR(0, 26676, __pyx_L1_error) PyErr_Clear(); } break; } } __Pyx_GOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v__x); __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v__x, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_TypeCheck(__pyx_cur_scope->__pyx_v__x, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); if (!__pyx_t_6) { } else { __pyx_t_5 = __pyx_t_6; goto __pyx_L7_bool_binop_done; } __pyx_t_6 = __Pyx_TypeCheck(__pyx_cur_scope->__pyx_v__x, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraphNode); __pyx_t_5 = __pyx_t_6; __pyx_L7_bool_binop_done:; __pyx_t_6 = (!__pyx_t_5); if (__pyx_t_6) { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_False); __pyx_r = Py_False; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L0; } } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_True); __pyx_r = Py_True; goto __pyx_L0; } CYTHON_MAYBE_UNUSED_VAR(__pyx_cur_scope); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); if (__Pyx_PyErr_Occurred()) { __Pyx_Generator_Replace_StopIteration(0); __Pyx_AddTraceback("genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename); } __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); #if !CYTHON_USE_EXC_INFO_STACK __Pyx_Coroutine_ResetAndClearException(__pyx_generator); #endif __pyx_generator->resume_label = -1; __Pyx_Coroutine_clear((PyObject*)__pyx_generator); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":26629 * return (_dict_cudaError_t[err], pGraphNode) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphAddEventRecordNode(graph, pDependencies : Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies, event): * """ Creates an event record node and adds it to a graph. */ static PyObject *__pyx_pf_4cuda_8bindings_7runtime_412cudaGraphAddEventRecordNode(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_graph, PyObject *__pyx_v_pDependencies, size_t __pyx_v_numDependencies, PyObject *__pyx_v_event) { cudaEvent_t __pyx_v_cyevent; PyObject *__pyx_v_pevent = NULL; cudaGraph_t __pyx_v_cygraph; PyObject *__pyx_v_pgraph = NULL; struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *__pyx_v_pGraphNode = 0; cudaGraphNode_t *__pyx_v_cypDependencies; Py_ssize_t __pyx_v_idx; cudaError_t __pyx_v_err; PyObject *__pyx_gb_4cuda_8bindings_7runtime_27cudaGraphAddEventRecordNode_2generator101 = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; Py_ssize_t __pyx_t_8; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; Py_ssize_t __pyx_t_12; Py_ssize_t __pyx_t_13; cudaError_t __pyx_t_14; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGraphAddEventRecordNode", 0); __Pyx_INCREF(__pyx_v_pDependencies); /* "cuda/bindings/runtime.pyx":26668 * """ * cdef cyruntime.cudaEvent_t cyevent * if event is None: # <<<<<<<<<<<<<< * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): */ __pyx_t_1 = (__pyx_v_event == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":26669 * cdef cyruntime.cudaEvent_t cyevent * if event is None: * pevent = 0 # <<<<<<<<<<<<<< * elif isinstance(event, (cudaEvent_t,driver.CUevent)): * pevent = int(event) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pevent = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":26668 * """ * cdef cyruntime.cudaEvent_t cyevent * if event is None: # <<<<<<<<<<<<<< * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":26670 * if event is None: * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): # <<<<<<<<<<<<<< * pevent = int(event) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_event, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_event, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUevent); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":26671 * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): * pevent = int(event) # <<<<<<<<<<<<<< * else: * pevent = int(cudaEvent_t(event)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_event); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26671, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pevent = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":26670 * if event is None: * pevent = 0 * elif isinstance(event, (cudaEvent_t,driver.CUevent)): # <<<<<<<<<<<<<< * pevent = int(event) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":26673 * pevent = int(event) * else: * pevent = int(cudaEvent_t(event)) # <<<<<<<<<<<<<< * cyevent = pevent * pDependencies = [] if pDependencies is None else pDependencies */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_event}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26673, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26673, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pevent = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":26674 * else: * pevent = int(cudaEvent_t(event)) * cyevent = pevent # <<<<<<<<<<<<<< * pDependencies = [] if pDependencies is None else pDependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pevent); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 26674, __pyx_L1_error) __pyx_v_cyevent = ((cudaEvent_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":26675 * pevent = int(cudaEvent_t(event)) * cyevent = pevent * pDependencies = [] if pDependencies is None else pDependencies # <<<<<<<<<<<<<< * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") */ __pyx_t_1 = (__pyx_v_pDependencies == Py_None); if (__pyx_t_1) { __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26675, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __pyx_t_3; __pyx_t_3 = 0; } else { __Pyx_INCREF(__pyx_v_pDependencies); __pyx_t_5 = __pyx_v_pDependencies; } __Pyx_DECREF_SET(__pyx_v_pDependencies, __pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":26676 * cyevent = pevent * pDependencies = [] if pDependencies is None else pDependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): # <<<<<<<<<<<<<< * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph */ __pyx_t_5 = __pyx_pf_4cuda_8bindings_7runtime_27cudaGraphAddEventRecordNode_genexpr(NULL, __pyx_v_pDependencies); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26676, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_Generator_GetInlinedResult(__pyx_t_5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26676, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(0, 26676, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_2 = (!__pyx_t_1); if (unlikely(__pyx_t_2)) { /* "cuda/bindings/runtime.pyx":26677 * pDependencies = [] if pDependencies is None else pDependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") # <<<<<<<<<<<<<< * cdef cyruntime.cudaGraph_t cygraph * if graph is None: */ __pyx_t_5 = NULL; __Pyx_INCREF(__pyx_builtin_TypeError); __pyx_t_4 = __pyx_builtin_TypeError; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_mstate_global->__pyx_kp_u_Argument_pDependencies_is_not_in}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26677, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(0, 26677, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":26676 * cyevent = pevent * pDependencies = [] if pDependencies is None else pDependencies * if not all(isinstance(_x, (cudaGraphNode_t,driver.CUgraphNode)) for _x in pDependencies): # <<<<<<<<<<<<<< * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph */ } /* "cuda/bindings/runtime.pyx":26679 * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph * if graph is None: # <<<<<<<<<<<<<< * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): */ __pyx_t_2 = (__pyx_v_graph == Py_None); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":26680 * cdef cyruntime.cudaGraph_t cygraph * if graph is None: * pgraph = 0 # <<<<<<<<<<<<<< * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): * pgraph = int(graph) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pgraph = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":26679 * raise TypeError("Argument 'pDependencies' is not instance of type (expected tuple[cyruntime.cudaGraphNode_t,driver.CUgraphNode] or list[cyruntime.cudaGraphNode_t,driver.CUgraphNode]") * cdef cyruntime.cudaGraph_t cygraph * if graph is None: # <<<<<<<<<<<<<< * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): */ goto __pyx_L7; } /* "cuda/bindings/runtime.pyx":26681 * if graph is None: * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): # <<<<<<<<<<<<<< * pgraph = int(graph) * else: */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_graph, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); if (!__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L8_bool_binop_done; } __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_graph, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraph); __pyx_t_2 = __pyx_t_1; __pyx_L8_bool_binop_done:; if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":26682 * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): * pgraph = int(graph) # <<<<<<<<<<<<<< * else: * pgraph = int(cudaGraph_t(graph)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_graph); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26682, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pgraph = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":26681 * if graph is None: * pgraph = 0 * elif isinstance(graph, (cudaGraph_t,driver.CUgraph)): # <<<<<<<<<<<<<< * pgraph = int(graph) * else: */ goto __pyx_L7; } /* "cuda/bindings/runtime.pyx":26684 * pgraph = int(graph) * else: * pgraph = int(cudaGraph_t(graph)) # <<<<<<<<<<<<<< * cygraph = pgraph * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraph_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_graph}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26684, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26684, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pgraph = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L7:; /* "cuda/bindings/runtime.pyx":26685 * else: * pgraph = int(cudaGraph_t(graph)) * cygraph = pgraph # <<<<<<<<<<<<<< * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pgraph); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 26685, __pyx_L1_error) __pyx_v_cygraph = ((cudaGraph_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":26686 * pgraph = int(cudaGraph_t(graph)) * cygraph = pgraph * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() # <<<<<<<<<<<<<< * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL * if len(pDependencies) > 1: */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, NULL}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (1-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26686, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_pGraphNode = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":26687 * cygraph = pgraph * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL # <<<<<<<<<<<<<< * if len(pDependencies) > 1: * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) */ __pyx_v_cypDependencies = NULL; /* "cuda/bindings/runtime.pyx":26688 * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL * if len(pDependencies) > 1: # <<<<<<<<<<<<<< * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26688, __pyx_L1_error) __pyx_t_2 = (__pyx_t_8 > 1); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":26689 * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL * if len(pDependencies) > 1: * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) # <<<<<<<<<<<<<< * if cypDependencies is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26689, __pyx_L1_error) __pyx_v_cypDependencies = ((cudaGraphNode_t *)calloc(__pyx_t_8, (sizeof(cudaGraphNode_t)))); /* "cuda/bindings/runtime.pyx":26690 * if len(pDependencies) > 1: * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) * else: */ __pyx_t_2 = (__pyx_v_cypDependencies == NULL); if (unlikely(__pyx_t_2)) { /* "cuda/bindings/runtime.pyx":26691 * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) # <<<<<<<<<<<<<< * else: * for idx in range(len(pDependencies)): */ __pyx_t_4 = NULL; __Pyx_INCREF(__pyx_builtin_MemoryError); __pyx_t_3 = __pyx_builtin_MemoryError; __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26691, __pyx_L1_error) __pyx_t_9 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26691, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_9); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 26691, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_Failed_to_allocate_length_x_size, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26691, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_x_2); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 26691, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyLong_FromSize_t((sizeof(cudaGraphNode_t))); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26691, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_9); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 26691, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_10, __pyx_t_11); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26691, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_t_9}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26691, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); } __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(0, 26691, __pyx_L1_error) /* "cuda/bindings/runtime.pyx":26690 * if len(pDependencies) > 1: * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: # <<<<<<<<<<<<<< * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) * else: */ } /* "cuda/bindings/runtime.pyx":26693 * raise MemoryError('Failed to allocate length x size memory: ' + str(len(pDependencies)) + 'x' + str(sizeof(cyruntime.cudaGraphNode_t))) * else: * for idx in range(len(pDependencies)): # <<<<<<<<<<<<<< * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] * elif len(pDependencies) == 1: */ /*else*/ { __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26693, __pyx_L1_error) __pyx_t_12 = __pyx_t_8; for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { __pyx_v_idx = __pyx_t_13; /* "cuda/bindings/runtime.pyx":26694 * else: * for idx in range(len(pDependencies)): * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] # <<<<<<<<<<<<<< * elif len(pDependencies) == 1: * cypDependencies = (pDependencies[0])._pvt_ptr */ __pyx_t_5 = __Pyx_GetItemInt(__pyx_v_pDependencies, __pyx_v_idx, Py_ssize_t, 1, PyLong_FromSsize_t, 0, 1, 1, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26694, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); (__pyx_v_cypDependencies[__pyx_v_idx]) = ((cudaGraphNode_t)(((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *)__pyx_t_5)->__pyx_base._pvt_ptr[0])); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } } /* "cuda/bindings/runtime.pyx":26688 * cdef cudaGraphNode_t pGraphNode = cudaGraphNode_t() * cdef cyruntime.cudaGraphNode_t* cypDependencies = NULL * if len(pDependencies) > 1: # <<<<<<<<<<<<<< * cypDependencies = calloc(len(pDependencies), sizeof(cyruntime.cudaGraphNode_t)) * if cypDependencies is NULL: */ goto __pyx_L10; } /* "cuda/bindings/runtime.pyx":26695 * for idx in range(len(pDependencies)): * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] * elif len(pDependencies) == 1: # <<<<<<<<<<<<<< * cypDependencies = (pDependencies[0])._pvt_ptr * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26695, __pyx_L1_error) __pyx_t_2 = (__pyx_t_8 == 1); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":26696 * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] * elif len(pDependencies) == 1: * cypDependencies = (pDependencies[0])._pvt_ptr # <<<<<<<<<<<<<< * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) * with nogil: */ __pyx_t_5 = __Pyx_GetItemInt(__pyx_v_pDependencies, 0, long, 1, __Pyx_PyLong_From_long, 0, 0, 1, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26696, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_v_cypDependencies = ((cudaGraphNode_t *)((struct __pyx_obj_4cuda_8bindings_7runtime_cudaGraphNode_t *)__pyx_t_5)->__pyx_base._pvt_ptr); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":26695 * for idx in range(len(pDependencies)): * cypDependencies[idx] = (pDependencies[idx])._pvt_ptr[0] * elif len(pDependencies) == 1: # <<<<<<<<<<<<<< * cypDependencies = (pDependencies[0])._pvt_ptr * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) */ } __pyx_L10:; /* "cuda/bindings/runtime.pyx":26697 * elif len(pDependencies) == 1: * cypDependencies = (pDependencies[0])._pvt_ptr * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGraphAddEventRecordNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cyevent) */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26697, __pyx_L1_error) __pyx_t_2 = (__pyx_v_numDependencies > ((size_t)__pyx_t_8)); if (unlikely(__pyx_t_2)) { __pyx_t_3 = NULL; __Pyx_INCREF(__pyx_builtin_RuntimeError); __pyx_t_9 = __pyx_builtin_RuntimeError; __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26697, __pyx_L1_error) __pyx_t_4 = PyLong_FromSsize_t(__pyx_t_8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26697, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_11 = __Pyx_PyObject_Unicode(__pyx_t_4); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 26697, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyUnicode_Concat(__pyx_mstate_global->__pyx_kp_u_List_is_too_small, __pyx_t_11); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26697, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __pyx_t_11 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_4, __pyx_mstate_global->__pyx_kp_u__6); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 26697, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyLong_FromSize_t(__pyx_v_numDependencies); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26697, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_10 = __Pyx_PyObject_Unicode(__pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 26697, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyUnicode_ConcatInPlace(__pyx_t_11, __pyx_t_10); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26697, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_4}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_9, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26697, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); } __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(0, 26697, __pyx_L1_error) } /* "cuda/bindings/runtime.pyx":26698 * cypDependencies = (pDependencies[0])._pvt_ptr * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphAddEventRecordNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cyevent) * if len(pDependencies) > 1 and cypDependencies is not NULL: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":26699 * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) * with nogil: * err = cyruntime.cudaGraphAddEventRecordNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cyevent) # <<<<<<<<<<<<<< * if len(pDependencies) > 1 and cypDependencies is not NULL: * free(cypDependencies) */ __pyx_t_14 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGraphAddEventRecordNode(((cudaGraphNode_t *)__pyx_v_pGraphNode->__pyx_base._pvt_ptr), __pyx_v_cygraph, __pyx_v_cypDependencies, __pyx_v_numDependencies, __pyx_v_cyevent); if (unlikely(__pyx_t_14 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 26699, __pyx_L16_error) __pyx_v_err = __pyx_t_14; } /* "cuda/bindings/runtime.pyx":26698 * cypDependencies = (pDependencies[0])._pvt_ptr * if numDependencies > len(pDependencies): raise RuntimeError("List is too small: " + str(len(pDependencies)) + " < " + str(numDependencies)) * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphAddEventRecordNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cyevent) * if len(pDependencies) > 1 and cypDependencies is not NULL: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L17; } __pyx_L16_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L17:; } } /* "cuda/bindings/runtime.pyx":26700 * with nogil: * err = cyruntime.cudaGraphAddEventRecordNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cyevent) * if len(pDependencies) > 1 and cypDependencies is not NULL: # <<<<<<<<<<<<<< * free(cypDependencies) * if err != cyruntime.cudaSuccess: */ __pyx_t_8 = PyObject_Length(__pyx_v_pDependencies); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(0, 26700, __pyx_L1_error) __pyx_t_1 = (__pyx_t_8 > 1); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L19_bool_binop_done; } __pyx_t_1 = (__pyx_v_cypDependencies != NULL); __pyx_t_2 = __pyx_t_1; __pyx_L19_bool_binop_done:; if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":26701 * err = cyruntime.cudaGraphAddEventRecordNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cyevent) * if len(pDependencies) > 1 and cypDependencies is not NULL: * free(cypDependencies) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ free(__pyx_v_cypDependencies); /* "cuda/bindings/runtime.pyx":26700 * with nogil: * err = cyruntime.cudaGraphAddEventRecordNode(pGraphNode._pvt_ptr, cygraph, cypDependencies, numDependencies, cyevent) * if len(pDependencies) > 1 and cypDependencies is not NULL: # <<<<<<<<<<<<<< * free(cypDependencies) * if err != cyruntime.cudaSuccess: */ } /* "cuda/bindings/runtime.pyx":26702 * if len(pDependencies) > 1 and cypDependencies is not NULL: * free(cypDependencies) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pGraphNode) */ __pyx_t_2 = (__pyx_v_err != cudaSuccess); if (__pyx_t_2) { /* "cuda/bindings/runtime.pyx":26703 * free(cypDependencies) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], pGraphNode) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26703, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_9 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26703, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_4 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_9); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26703, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26703, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_4); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_4) != (0)) __PYX_ERR(0, 26703, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_9, 1, Py_None) != (0)) __PYX_ERR(0, 26703, __pyx_L1_error); __pyx_t_4 = 0; __pyx_r = __pyx_t_9; __pyx_t_9 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":26702 * if len(pDependencies) > 1 and cypDependencies is not NULL: * free(cypDependencies) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pGraphNode) */ } /* "cuda/bindings/runtime.pyx":26704 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], pGraphNode) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 26704, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26704, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_9, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26704, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26704, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 26704, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_pGraphNode); __Pyx_GIVEREF((PyObject *)__pyx_v_pGraphNode); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, ((PyObject *)__pyx_v_pGraphNode)) != (0)) __PYX_ERR(0, 26704, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":26629 * return (_dict_cudaError_t[err], pGraphNode) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphAddEventRecordNode(graph, pDependencies : Optional[tuple[cudaGraphNode_t] | list[cudaGraphNode_t]], size_t numDependencies, event): * """ Creates an event record node and adds it to a graph. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphAddEventRecordNode", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pevent); __Pyx_XDECREF(__pyx_v_pgraph); __Pyx_XDECREF((PyObject *)__pyx_v_pGraphNode); __Pyx_XDECREF(__pyx_gb_4cuda_8bindings_7runtime_27cudaGraphAddEventRecordNode_2generator101); __Pyx_XDECREF(__pyx_v_pDependencies); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":26706 * return (_dict_cudaError_t[err], pGraphNode) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphEventRecordNodeGetEvent(node): * """ Returns the event associated with an event record node. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_415cudaGraphEventRecordNodeGetEvent(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_414cudaGraphEventRecordNodeGetEvent, "cudaGraphEventRecordNodeGetEvent(node)\n\nReturns the event associated with an event record node.\n\nReturns the event of event record node `hNode` in `event_out`.\n\nParameters\n----------\nhNode : :py:obj:`~.CUgraphNode` or :py:obj:`~.cudaGraphNode_t`\n Node to get the event for\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\nevent_out : :py:obj:`~.cudaEvent_t`\n Pointer to return the event\n\nSee Also\n--------\n:py:obj:`~.cudaGraphAddEventRecordNode`, :py:obj:`~.cudaGraphEventRecordNodeSetEvent`, :py:obj:`~.cudaGraphEventWaitNodeGetEvent`, :py:obj:`~.cudaEventRecordWithFlags`, :py:obj:`~.cudaStreamWaitEvent`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_415cudaGraphEventRecordNodeGetEvent = {"cudaGraphEventRecordNodeGetEvent", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_415cudaGraphEventRecordNodeGetEvent, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_414cudaGraphEventRecordNodeGetEvent}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_415cudaGraphEventRecordNodeGetEvent(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_node = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[1] = {0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGraphEventRecordNodeGetEvent (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_node_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 26706, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 26706, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGraphEventRecordNodeGetEvent", 0) < (0)) __PYX_ERR(0, 26706, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 1; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGraphEventRecordNodeGetEvent", 1, 1, 1, i); __PYX_ERR(0, 26706, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 1)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 26706, __pyx_L3_error) } __pyx_v_node = values[0]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGraphEventRecordNodeGetEvent", 1, 1, 1, __pyx_nargs); __PYX_ERR(0, 26706, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphEventRecordNodeGetEvent", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_414cudaGraphEventRecordNodeGetEvent(__pyx_self, __pyx_v_node); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_414cudaGraphEventRecordNodeGetEvent(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_node) { cudaGraphNode_t __pyx_v_cynode; PyObject *__pyx_v_pnode = NULL; struct __pyx_obj_4cuda_8bindings_7runtime_cudaEvent_t *__pyx_v_event_out = 0; cudaError_t __pyx_v_err; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; size_t __pyx_t_6; __pyx_t_4cuda_8bindings_7runtime_void_ptr __pyx_t_7; cudaError_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("cudaGraphEventRecordNodeGetEvent", 0); /* "cuda/bindings/runtime.pyx":26729 * """ * cdef cyruntime.cudaGraphNode_t cynode * if node is None: # <<<<<<<<<<<<<< * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): */ __pyx_t_1 = (__pyx_v_node == Py_None); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":26730 * cdef cyruntime.cudaGraphNode_t cynode * if node is None: * pnode = 0 # <<<<<<<<<<<<<< * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): * pnode = int(node) */ __Pyx_INCREF(__pyx_mstate_global->__pyx_int_0); __pyx_v_pnode = __pyx_mstate_global->__pyx_int_0; /* "cuda/bindings/runtime.pyx":26729 * """ * cdef cyruntime.cudaGraphNode_t cynode * if node is None: # <<<<<<<<<<<<<< * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":26731 * if node is None: * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): # <<<<<<<<<<<<<< * pnode = int(node) * else: */ __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_node, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = __Pyx_TypeCheck(__pyx_v_node, __pyx_mstate_global->__pyx_ptype_4cuda_8bindings_6driver_CUgraphNode); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":26732 * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): * pnode = int(node) # <<<<<<<<<<<<<< * else: * pnode = int(cudaGraphNode_t(node)) */ __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_v_node); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26732, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_pnode = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "cuda/bindings/runtime.pyx":26731 * if node is None: * pnode = 0 * elif isinstance(node, (cudaGraphNode_t,driver.CUgraphNode)): # <<<<<<<<<<<<<< * pnode = int(node) * else: */ goto __pyx_L3; } /* "cuda/bindings/runtime.pyx":26734 * pnode = int(node) * else: * pnode = int(cudaGraphNode_t(node)) # <<<<<<<<<<<<<< * cynode = pnode * cdef cudaEvent_t event_out = cudaEvent_t() */ /*else*/ { __pyx_t_4 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_5 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaGraphNode_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_node}; __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+__pyx_t_6, (2-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26734, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_3); } __pyx_t_5 = __Pyx_PyNumber_Int(((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26734, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF((PyObject *)__pyx_t_3); __pyx_t_3 = 0; __pyx_v_pnode = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; } __pyx_L3:; /* "cuda/bindings/runtime.pyx":26735 * else: * pnode = int(cudaGraphNode_t(node)) * cynode = pnode # <<<<<<<<<<<<<< * cdef cudaEvent_t event_out = cudaEvent_t() * with nogil: */ __pyx_t_7 = __Pyx_PyLong_As_unsigned_PY_LONG_LONG(__pyx_v_pnode); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 26735, __pyx_L1_error) __pyx_v_cynode = ((cudaGraphNode_t)((__pyx_t_4cuda_8bindings_7runtime_void_ptr)__pyx_t_7)); /* "cuda/bindings/runtime.pyx":26736 * pnode = int(cudaGraphNode_t(node)) * cynode = pnode * cdef cudaEvent_t event_out = cudaEvent_t() # <<<<<<<<<<<<<< * with nogil: * err = cyruntime.cudaGraphEventRecordNodeGetEvent(cynode, event_out._pvt_ptr) */ __pyx_t_3 = NULL; __Pyx_INCREF((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_4 = ((PyObject *)__pyx_mstate_global->__pyx_ptype_4cuda_8bindings_7runtime_cudaEvent_t); __pyx_t_6 = 1; { PyObject *__pyx_callargs[2] = {__pyx_t_3, NULL}; __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+__pyx_t_6, (1-__pyx_t_6) | (__pyx_t_6*__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)); __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26736, __pyx_L1_error) __Pyx_GOTREF((PyObject *)__pyx_t_5); } __pyx_v_event_out = ((struct __pyx_obj_4cuda_8bindings_7runtime_cudaEvent_t *)__pyx_t_5); __pyx_t_5 = 0; /* "cuda/bindings/runtime.pyx":26737 * cynode = pnode * cdef cudaEvent_t event_out = cudaEvent_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphEventRecordNodeGetEvent(cynode, event_out._pvt_ptr) * if err != cyruntime.cudaSuccess: */ { PyThreadState *_save; _save = NULL; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); /*try:*/ { /* "cuda/bindings/runtime.pyx":26738 * cdef cudaEvent_t event_out = cudaEvent_t() * with nogil: * err = cyruntime.cudaGraphEventRecordNodeGetEvent(cynode, event_out._pvt_ptr) # <<<<<<<<<<<<<< * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) */ __pyx_t_8 = __pyx_f_4cuda_8bindings_9cyruntime_cudaGraphEventRecordNodeGetEvent(__pyx_v_cynode, ((cudaEvent_t *)__pyx_v_event_out->__pyx_base._pvt_ptr)); if (unlikely(__pyx_t_8 == ((cudaError_t)cudaErrorCallRequiresNewerDriver) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 26738, __pyx_L7_error) __pyx_v_err = __pyx_t_8; } /* "cuda/bindings/runtime.pyx":26737 * cynode = pnode * cdef cudaEvent_t event_out = cudaEvent_t() * with nogil: # <<<<<<<<<<<<<< * err = cyruntime.cudaGraphEventRecordNodeGetEvent(cynode, event_out._pvt_ptr) * if err != cyruntime.cudaSuccess: */ /*finally:*/ { /*normal exit:*/{ __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L8; } __pyx_L7_error: { __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS goto __pyx_L1_error; } __pyx_L8:; } } /* "cuda/bindings/runtime.pyx":26739 * with nogil: * err = cyruntime.cudaGraphEventRecordNodeGetEvent(cynode, event_out._pvt_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], event_out) */ __pyx_t_1 = (__pyx_v_err != cudaSuccess); if (__pyx_t_1) { /* "cuda/bindings/runtime.pyx":26740 * err = cyruntime.cudaGraphEventRecordNodeGetEvent(cynode, event_out._pvt_ptr) * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], event_out) * */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26740, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_4 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26740, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26740, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26740, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3) != (0)) __PYX_ERR(0, 26740, __pyx_L1_error); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, Py_None) != (0)) __PYX_ERR(0, 26740, __pyx_L1_error); __pyx_t_3 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":26739 * with nogil: * err = cyruntime.cudaGraphEventRecordNodeGetEvent(cynode, event_out._pvt_ptr) * if err != cyruntime.cudaSuccess: # <<<<<<<<<<<<<< * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], event_out) */ } /* "cuda/bindings/runtime.pyx":26741 * if err != cyruntime.cudaSuccess: * return (_dict_cudaError_t[err], None) * return (_dict_cudaError_t[err], event_out) # <<<<<<<<<<<<<< * * @cython.embedsignature(True) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_mstate_global->__pyx_n_u_dict_cudaError_t); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26741, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyLong_From_enum__cudaError(__pyx_v_err); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26741, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26741, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26741, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_5); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_5) != (0)) __PYX_ERR(0, 26741, __pyx_L1_error); __Pyx_INCREF((PyObject *)__pyx_v_event_out); __Pyx_GIVEREF((PyObject *)__pyx_v_event_out); if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, ((PyObject *)__pyx_v_event_out)) != (0)) __PYX_ERR(0, 26741, __pyx_L1_error); __pyx_t_5 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "cuda/bindings/runtime.pyx":26706 * return (_dict_cudaError_t[err], pGraphNode) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphEventRecordNodeGetEvent(node): * """ Returns the event associated with an event record node. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphEventRecordNodeGetEvent", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_pnode); __Pyx_XDECREF((PyObject *)__pyx_v_event_out); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "cuda/bindings/runtime.pyx":26743 * return (_dict_cudaError_t[err], event_out) * * @cython.embedsignature(True) # <<<<<<<<<<<<<< * def cudaGraphEventRecordNodeSetEvent(node, event): * """ Sets an event record node's event. */ /* Python wrapper */ static PyObject *__pyx_pw_4cuda_8bindings_7runtime_417cudaGraphEventRecordNodeSetEvent(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ); /*proto*/ PyDoc_STRVAR(__pyx_doc_4cuda_8bindings_7runtime_416cudaGraphEventRecordNodeSetEvent, "cudaGraphEventRecordNodeSetEvent(node, event)\n\nSets an event record node's event.\n\nSets the event of event record node `hNode` to `event`.\n\nParameters\n----------\nhNode : :py:obj:`~.CUgraphNode` or :py:obj:`~.cudaGraphNode_t`\n Node to set the event for\nevent : :py:obj:`~.CUevent` or :py:obj:`~.cudaEvent_t`\n Event to use\n\nReturns\n-------\ncudaError_t\n :py:obj:`~.cudaSuccess`, :py:obj:`~.cudaErrorInvalidValue`\n\nSee Also\n--------\n:py:obj:`~.cudaGraphNodeSetParams`, :py:obj:`~.cudaGraphAddEventRecordNode`, :py:obj:`~.cudaGraphEventRecordNodeGetEvent`, :py:obj:`~.cudaGraphEventWaitNodeSetEvent`, :py:obj:`~.cudaEventRecordWithFlags`, :py:obj:`~.cudaStreamWaitEvent`"); static PyMethodDef __pyx_mdef_4cuda_8bindings_7runtime_417cudaGraphEventRecordNodeSetEvent = {"cudaGraphEventRecordNodeSetEvent", (PyCFunction)(void(*)(void))(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_4cuda_8bindings_7runtime_417cudaGraphEventRecordNodeSetEvent, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_4cuda_8bindings_7runtime_416cudaGraphEventRecordNodeSetEvent}; static PyObject *__pyx_pw_4cuda_8bindings_7runtime_417cudaGraphEventRecordNodeSetEvent(PyObject *__pyx_self, #if CYTHON_METH_FASTCALL PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds #else PyObject *__pyx_args, PyObject *__pyx_kwds #endif ) { PyObject *__pyx_v_node = 0; PyObject *__pyx_v_event = 0; #if !CYTHON_METH_FASTCALL CYTHON_UNUSED Py_ssize_t __pyx_nargs; #endif CYTHON_UNUSED PyObject *const *__pyx_kwvalues; PyObject* values[2] = {0,0}; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("cudaGraphEventRecordNodeSetEvent (wrapper)", 0); #if !CYTHON_METH_FASTCALL #if CYTHON_ASSUME_SAFE_SIZE __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); #else __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL; #endif #endif __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); { PyObject ** const __pyx_pyargnames[] = {&__pyx_mstate_global->__pyx_n_u_node_2,&__pyx_mstate_global->__pyx_n_u_event_2,0}; const Py_ssize_t __pyx_kwds_len = (__pyx_kwds) ? __Pyx_NumKwargs_FASTCALL(__pyx_kwds) : 0; if (unlikely(__pyx_kwds_len) < 0) __PYX_ERR(0, 26743, __pyx_L3_error) if (__pyx_kwds_len > 0) { switch (__pyx_nargs) { case 2: values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 26743, __pyx_L3_error) CYTHON_FALLTHROUGH; case 1: values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 26743, __pyx_L3_error) CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } const Py_ssize_t kwd_pos_args = __pyx_nargs; if (__Pyx_ParseKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values, kwd_pos_args, __pyx_kwds_len, "cudaGraphEventRecordNodeSetEvent", 0) < (0)) __PYX_ERR(0, 26743, __pyx_L3_error) for (Py_ssize_t i = __pyx_nargs; i < 2; i++) { if (unlikely(!values[i])) { __Pyx_RaiseArgtupleInvalid("cudaGraphEventRecordNodeSetEvent", 1, 2, 2, i); __PYX_ERR(0, 26743, __pyx_L3_error) } } } else if (unlikely(__pyx_nargs != 2)) { goto __pyx_L5_argtuple_error; } else { values[0] = __Pyx_ArgRef_FASTCALL(__pyx_args, 0); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[0])) __PYX_ERR(0, 26743, __pyx_L3_error) values[1] = __Pyx_ArgRef_FASTCALL(__pyx_args, 1); if (!CYTHON_ASSUME_SAFE_MACROS && unlikely(!values[1])) __PYX_ERR(0, 26743, __pyx_L3_error) } __pyx_v_node = values[0]; __pyx_v_event = values[1]; } goto __pyx_L6_skip; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("cudaGraphEventRecordNodeSetEvent", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 26743, __pyx_L3_error) __pyx_L6_skip:; goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_AddTraceback("cuda.bindings.runtime.cudaGraphEventRecordNodeSetEvent", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_4cuda_8bindings_7runtime_416cudaGraphEventRecordNodeSetEvent(__pyx_self, __pyx_v_node, __pyx_v_event); /* function exit code */ for (Py_ssize_t __pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { Py_XDECREF(values[__pyx_temp]); } __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_4cuda_8bindings_7runtime_416cudaGraphEventRecordNodeSetEvent(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_node, PyObject *__pyx_v_event) { cudaEvent_t __pyx_v_cyevent; PyObject *__pyx_v_pevent = NULL; cudaGraphNode_t __pyx_v_cynode; PyObject *__pyx_v_pno