From 375835bbefb9c4e9998c15dbfca96e0553d7cc7d Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Sat, 20 May 2023 20:38:26 -0500 Subject: [PATCH 01/40] Update to SuiteSparse:GraphBLAS 8.0.0 --- .github/workflows/test_and_build.yml | 8 +- docs/env.yml | 2 +- graphblas/core/base.py | 6 +- graphblas/core/descriptor.py | 1 + graphblas/core/expr.py | 2 +- graphblas/core/matrix.py | 4 +- graphblas/core/ss/config.py | 56 +++++++++++--- graphblas/core/ss/context.py | 111 +++++++++++++++++++++++++++ graphblas/core/ss/descriptor.py | 13 ++-- graphblas/core/vector.py | 7 +- graphblas/ss/__init__.py | 3 +- graphblas/ss/_core.py | 67 +++++++++++++--- graphblas/tests/test_matrix.py | 2 +- pyproject.toml | 4 +- scripts/check_versions.sh | 4 +- 15 files changed, 243 insertions(+), 47 deletions(-) create mode 100644 graphblas/core/ss/context.py diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index d129ef26f..537ec16cd 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -134,7 +134,7 @@ jobs: 1 1 1 - 1 + 10000000 - name: Setup mamba uses: conda-incubator/setup-miniconda@v2 id: setup_mamba @@ -204,13 +204,13 @@ jobs: # But, it's still useful for us to test with different versions! psg="" if [[ ${{ steps.sourcetype.outputs.selected}} == "conda-forge" ]] ; then - psgver=$(python -c 'import random ; print(random.choice(["=7.4.0", "=7.4.1", "=7.4.2", "=7.4.3.0", "=7.4.3.1", "=7.4.3.2", ""]))') + psgver=$(python -c 'import random ; print(random.choice(["=8.0.0.0", ""]))') psg=python-suitesparse-graphblas${psgver} elif [[ ${{ steps.sourcetype.outputs.selected}} == "wheel" ]] ; then psgver=$(python -c 'import random ; print(random.choice(["==7.4.3.2", ""]))') elif [[ ${{ steps.sourcetype.outputs.selected}} == "source" ]] ; then # These should be exact versions - psgver=$(python -c 'import random ; print(random.choice(["==7.4.0.0", "==7.4.1.0", "==7.4.2.0", "==7.4.3.0", "==7.4.3.1", "==7.4.3.2", ""]))') + psgver=$(python -c 'import random ; print(random.choice(["==8.0.0.0", ""]))') else psgver="" fi @@ -258,7 +258,7 @@ jobs: ${{ matrix.slowtask == 'pytest_bizarro' && 'black' || '' }} \ ${{ matrix.slowtask == 'notebooks' && 'matplotlib nbconvert jupyter "ipython>=7"' || '' }} \ ${{ steps.sourcetype.outputs.selected == 'upstream' && 'cython' || '' }} \ - ${{ steps.sourcetype.outputs.selected != 'wheel' && '"graphblas>=7.4.0"' || '' }} \ + ${{ steps.sourcetype.outputs.selected != 'wheel' && '"graphblas>=8.0.0"' || '' }} \ ${{ contains(steps.pyver.outputs.selected, 'pypy') && 'pypy' || '' }} - name: Build extension module run: | diff --git a/docs/env.yml b/docs/env.yml index c0c4c8999..631f770f7 100644 --- a/docs/env.yml +++ b/docs/env.yml @@ -8,7 +8,7 @@ dependencies: # python-graphblas dependencies - donfig - numba - - python-suitesparse-graphblas>=7.4.0.0 + - python-suitesparse-graphblas>=8.0.0.0 - pyyaml # extra dependencies - matplotlib diff --git a/graphblas/core/base.py b/graphblas/core/base.py index a4e48b612..ac94abe10 100644 --- a/graphblas/core/base.py +++ b/graphblas/core/base.py @@ -348,7 +348,7 @@ def _update(self, expr, mask=None, accum=None, replace=False, input_mask=None, * return if opts: # Ignore opts for now - descriptor_lookup(**opts) + desc = descriptor_lookup(**opts) self.value = expr return @@ -371,7 +371,7 @@ def _update(self, expr, mask=None, accum=None, replace=False, input_mask=None, * else: if opts: # Ignore opts for now - descriptor_lookup(**opts) + desc = descriptor_lookup(**opts) self.value = expr return else: @@ -571,7 +571,7 @@ def _new(self, dtype, mask, name, is_cscalar=None, **opts): ): if opts: # Ignore opts for now - descriptor_lookup(**opts) + desc = descriptor_lookup(**opts) # noqa: F841 if self._is_scalar and self._value._is_cscalar != is_cscalar: return self._value.dup(is_cscalar=is_cscalar, name=name) rv = self._value diff --git a/graphblas/core/descriptor.py b/graphblas/core/descriptor.py index 1e195e3fe..9ba3ae466 100644 --- a/graphblas/core/descriptor.py +++ b/graphblas/core/descriptor.py @@ -26,6 +26,7 @@ def __init__( self.mask_structure = mask_structure self.transpose_first = transpose_first self.transpose_second = transpose_second + self._context = None # Used by SuiteSparse:GraphBLAS @property def _carg(self): diff --git a/graphblas/core/expr.py b/graphblas/core/expr.py index 48839bcff..03fb30ef1 100644 --- a/graphblas/core/expr.py +++ b/graphblas/core/expr.py @@ -421,7 +421,7 @@ def _setitem(self, resolved_indexes, obj, *, is_submask): # Fast path using assignElement if self.opts: # Ignore opts for now - descriptor_lookup(**self.opts) + desc = descriptor_lookup(**self.opts) # noqa: F841 self.parent._assign_element(resolved_indexes, obj) else: mask = self.kwargs.get("mask") diff --git a/graphblas/core/matrix.py b/graphblas/core/matrix.py index 2542ad00e..73646f901 100644 --- a/graphblas/core/matrix.py +++ b/graphblas/core/matrix.py @@ -665,7 +665,7 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts): else: if opts: # Ignore opts for now - descriptor_lookup(**opts) + desc = descriptor_lookup(**opts) # noqa: F841 new_mat = ffi_new("GrB_Matrix*") rv = Matrix._from_obj(new_mat, self.dtype, self._nrows, self._ncols, name=name) call("GrB_Matrix_dup", [_Pointer(rv), self]) @@ -2707,7 +2707,7 @@ def _extract_element( result = Scalar(dtype, is_cscalar=is_cscalar, name=name) if opts: # Ignore opts for now - descriptor_lookup(**opts) + desc = descriptor_lookup(**opts) # noqa: F841 if is_cscalar: dtype_name = "UDT" if dtype._is_udt else dtype.name if ( diff --git a/graphblas/core/ss/config.py b/graphblas/core/ss/config.py index ca91cc198..52ee33478 100644 --- a/graphblas/core/ss/config.py +++ b/graphblas/core/ss/config.py @@ -12,6 +12,9 @@ class BaseConfig(MutableMapping): # Subclasses should redefine these _get_function = None _set_function = None + _context_get_function = "GxB_Context_get" + _context_set_function = "GxB_Context_set" + _context_keys = set() _null_valid = {} _options = {} _defaults = {} @@ -28,7 +31,7 @@ class BaseConfig(MutableMapping): "GxB_Format_Value", } - def __init__(self, parent=None): + def __init__(self, parent=None, context=None): cls = type(self) if not cls._initialized: cls._reverse_enumerations = {} @@ -51,6 +54,7 @@ def __init__(self, parent=None): rd[k] = k cls._initialized = True self._parent = parent + self._context = context def __delitem__(self, key): raise TypeError("Configuration options can't be deleted.") @@ -61,19 +65,27 @@ def __getitem__(self, key): raise KeyError(key) key_obj, ctype = self._options[key] is_bool = ctype == "bool" + if is_context := (key in self._context_keys): + get_function_base = self._context_get_function + else: + get_function_base = self._get_function if ctype in self._int32_ctypes: ctype = "int32_t" - get_function_name = f"{self._get_function}_INT32" + get_function_name = f"{get_function_base}_INT32" elif ctype.startswith("int64_t"): - get_function_name = f"{self._get_function}_INT64" + get_function_name = f"{get_function_base}_INT64" elif ctype.startswith("double"): - get_function_name = f"{self._get_function}_FP64" + get_function_name = f"{get_function_base}_FP64" + elif ctype.startswith("char"): + get_function_name = f"{get_function_base}_CHAR" else: # pragma: no cover (sanity) raise ValueError(ctype) get_function = getattr(lib, get_function_name) is_array = "[" in ctype val_ptr = ffi.new(ctype if is_array else f"{ctype}*") - if self._parent is None: + if is_context: + info = get_function(self._context._carg, key_obj, val_ptr) + elif self._parent is None: info = get_function(key_obj, val_ptr) else: info = get_function(self._parent._carg, key_obj, val_ptr) @@ -93,6 +105,8 @@ def __getitem__(self, key): return rv if is_bool: return bool(val_ptr[0]) + if ctype.startswith("char"): + return ffi.string(val_ptr[0]).decode() return val_ptr[0] raise _error_code_lookup[info](f"Failed to get info for {key!r}") # pragma: no cover @@ -103,15 +117,21 @@ def __setitem__(self, key, val): if key in self._read_only: raise ValueError(f"Config option {key!r} is read-only") key_obj, ctype = self._options[key] + if is_context := (key in self._context_keys): + set_function_base = self._context_set_function + else: + set_function_base = self._set_function if ctype in self._int32_ctypes: ctype = "int32_t" - set_function_name = f"{self._set_function}_INT32" + set_function_name = f"{set_function_base}_INT32" elif ctype == "double": - set_function_name = f"{self._set_function}_FP64" + set_function_name = f"{set_function_base}_FP64" elif ctype.startswith("int64_t["): - set_function_name = f"{self._set_function}_INT64_ARRAY" + set_function_name = f"{set_function_base}_INT64_ARRAY" elif ctype.startswith("double["): - set_function_name = f"{self._set_function}_FP64_ARRAY" + set_function_name = f"{set_function_base}_FP64_ARRAY" + elif ctype.startswith("char"): + set_function_name = f"{set_function_base}_CHAR" else: # pragma: no cover (sanity) raise ValueError(ctype) set_function = getattr(lib, set_function_name) @@ -154,9 +174,18 @@ def __setitem__(self, key, val): f"expected {size}, got {vals.size}: {val}" ) val_obj = ffi.from_buffer(ctype, vals) + elif ctype.startswith("char"): + val_obj = ffi.new("char[]", val.encode()) else: val_obj = ffi.cast(ctype, val) - if self._parent is None: + if is_context: + if self._context is None: + from .context import Context + + self._context = Context._maybe_new() + self._parent._context = self._context # Set context to descriptor + info = set_function(self._context._carg, key_obj, val_obj) + elif self._parent is None: info = set_function(key_obj, val_obj) else: info = set_function(self._parent._carg, key_obj, val_obj) @@ -174,7 +203,12 @@ def __len__(self): return len(self._options) def __repr__(self): - return "{" + ",\n ".join(f"{k!r}: {v!r}" for k, v in self.items()) + "}" + return ( + type(self).__name__ + + "({" + + ",\n ".join(f"{k!r}: {v!r}" for k, v in self.items()) + + "})" + ) def _ipython_key_completions_(self): # pragma: no cover (ipython) return list(self) diff --git a/graphblas/core/ss/context.py b/graphblas/core/ss/context.py new file mode 100644 index 000000000..808fc1fc0 --- /dev/null +++ b/graphblas/core/ss/context.py @@ -0,0 +1,111 @@ +import threading + +from ...exceptions import check_status, check_status_carg +from .. import ffi, lib +from .config import BaseConfig + +ffi_new = ffi.new + + +class ThreadLocal(threading.local): + """Hold the active context for the current thread.""" + + context = None + + +threadlocal = ThreadLocal() + + +class Context(BaseConfig): + _context_keys = {"chunk", "gpu_id", "nthreads"} + _options = { + "chunk": (lib.GxB_CONTEXT_CHUNK, "double"), + "gpu_id": (lib.GxB_CONTEXT_GPU_ID, "int"), + "nthreads": (lib.GxB_CONTEXT_NTHREADS, "int"), + } + _defaults = { + "nthreads": 0, + "chunk": 0, + "gpu_id": -1, # -1 means no GPU (I think) + } + + def __init__(self, engage=True, *, nthreads=None, chunk=None, gpu_id=None): + super().__init__() + if nthreads is not None: + self["nthreads"] = nthreads + if chunk is not None: + self["chunk"] = chunk + if gpu_id is not None: + self["gpu_id"] = gpu_id + if engage: + self.engage() + + def __new__(cls, engage=True, **opts): + self = object.__new__(cls) + self.gb_obj = ffi_new("GxB_Context*") + check_status_carg(lib.GxB_Context_new(self.gb_obj), "Context", self.gb_obj[0]) + return self + + @classmethod + def _from_obj(cls, gb_obj=None): + self = object.__new__(cls) + self.gb_obj = gb_obj + self.__init__(engage=False) + return self + + @classmethod + def _maybe_new(cls): + if threadlocal.context is not None: + return threadlocal.context + self = cls(engage=False) + check_status(lib.GxB_Context_engage(self._carg), self) + # Don't assign to threadlocal.context; instead, let it disengage upon going out of scope + return self + + @property + def _carg(self): + return self.gb_obj[0] + + def __del__(self): + gb_obj = getattr(self, "gb_obj", None) + if gb_obj is not None and lib is not None: # pragma: no branch (safety) + try: + lib.GxB_Context_disengage(gb_obj[0]) + finally: + check_status(lib.GxB_Context_free(gb_obj), self) + + def engage(self): + check_status(lib.GxB_Context_engage(self._carg), self) + threadlocal.context = self + + def disengage(self): + if threadlocal.context is self: + threadlocal.context = None + check_status(lib.GxB_Context_disengage(self._carg), self) + + def __enter__(self): + self.engage() + + def __exit__(self, exc_type, exc, exc_tb): + self.disengage() + + @property + def _context(self): + return self + + @_context.setter + def _context(self, val): + if val is not None: + raise AttributeError("'_context' attribute is read-only") + + +class GlobalContext(Context): + @property + def _carg(self): + return self.gb_obj + + def __del__(self): # pragma: no cover (safety) + pass + + +global_context = GlobalContext._from_obj(lib.GxB_CONTEXT_WORLD) diff --git a/graphblas/core/ss/descriptor.py b/graphblas/core/ss/descriptor.py index 2f7d11ffa..66839275c 100644 --- a/graphblas/core/ss/descriptor.py +++ b/graphblas/core/ss/descriptor.py @@ -18,6 +18,7 @@ class _DescriptorConfig(BaseConfig): _get_function = "GxB_Desc_get" _set_function = "GxB_Desc_set" + _context_keys = {"chunk", "gpu_id", "nthreads"} _options = { # GrB "output_replace": (lib.GrB_OUTP, "GrB_Desc_Value"), @@ -26,12 +27,12 @@ class _DescriptorConfig(BaseConfig): "transpose_first": (lib.GrB_INP0, "GrB_Desc_Value"), "transpose_second": (lib.GrB_INP1, "GrB_Desc_Value"), # GxB - "nthreads": (lib.GxB_DESCRIPTOR_NTHREADS, "int"), - "chunk": (lib.GxB_DESCRIPTOR_CHUNK, "double"), + "chunk": (lib.GxB_CONTEXT_CHUNK, "double"), + "gpu_id": (lib.GxB_CONTEXT_GPU_ID, "int"), + "nthreads": (lib.GxB_CONTEXT_NTHREADS, "int"), "axb_method": (lib.GxB_AxB_METHOD, "GrB_Desc_Value"), "sort": (lib.GxB_SORT, "int"), "secure_import": (lib.GxB_IMPORT, "int"), - # "gpu_control": (GxB_DESCRIPTOR_GPU_CONTROL, "GrB_Desc_Value"), # Coming soon... } _enumerations = { # GrB @@ -71,10 +72,6 @@ class _DescriptorConfig(BaseConfig): False: False, True: lib.GxB_SORT, }, - # "gpu_control": { # Coming soon... - # "always": lib.GxB_GPU_ALWAYS, - # "never": lib.GxB_GPU_NEVER, - # }, } _defaults = { # GrB @@ -89,8 +86,8 @@ class _DescriptorConfig(BaseConfig): "axb_method": "default", "sort": False, "secure_import": False, + "gpu_id": -1, # -1 means no GPU (I think) } - _count = 0 def __init__(self): gb_obj = ffi_new("GrB_Descriptor*") diff --git a/graphblas/core/vector.py b/graphblas/core/vector.py index d2ddee372..102c692bd 100644 --- a/graphblas/core/vector.py +++ b/graphblas/core/vector.py @@ -612,7 +612,7 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts): else: if opts: # Ignore opts for now - descriptor_lookup(**opts) + desc = descriptor_lookup(**opts) # noqa: F841 rv = Vector._from_obj(ffi_new("GrB_Vector*"), self.dtype, self._size, name=name) call("GrB_Vector_dup", [_Pointer(rv), self]) return rv @@ -1757,7 +1757,7 @@ def _extract_element( result = Scalar(dtype, is_cscalar=is_cscalar, name=name) if opts: # Ignore opts for now - descriptor_lookup(**opts) + desc = descriptor_lookup(**opts) # noqa: F841 if is_cscalar: dtype_name = "UDT" if dtype._is_udt else dtype.name if ( @@ -2177,6 +2177,9 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts): if clear: if dtype is None: dtype = self.dtype + if opts: + # Ignore opts for now + desc = descriptor_lookup(**opts) # noqa: F841 return self.output_type(dtype, *self.shape, name=name) return self.new(dtype, mask=mask, name=name, **opts) diff --git a/graphblas/ss/__init__.py b/graphblas/ss/__init__.py index b36bc1bdc..aa140248d 100644 --- a/graphblas/ss/__init__.py +++ b/graphblas/ss/__init__.py @@ -1 +1,2 @@ -from ._core import about, concat, config, diag +from ..core.ss.context import Context, global_context +from ._core import about, concat, config, diag, jit diff --git a/graphblas/ss/_core.py b/graphblas/ss/_core.py index ec5a89504..1559a1c5b 100644 --- a/graphblas/ss/_core.py +++ b/graphblas/ss/_core.py @@ -2,9 +2,11 @@ from ..core import ffi, lib from ..core.base import _expect_type +from ..core.descriptor import lookup as descriptor_lookup from ..core.matrix import Matrix, TransposedMatrix from ..core.scalar import _as_scalar from ..core.ss.config import BaseConfig +from ..core.ss.context import global_context from ..core.ss.matrix import _concat_mn from ..core.vector import Vector from ..dtypes import INT64 @@ -52,6 +54,9 @@ def diag(x, k=0, dtype=None, *, name=None, **opts): dtype = x.dtype typ = type(x) if typ is Vector: + if opts: + # Ignore opts for now + desc = descriptor_lookup(**opts) # noqa: F841 size = x._size + abs(k.value) rv = Matrix(dtype, nrows=size, ncols=size, name=name) rv.ss.build_diag(x, k) @@ -120,15 +125,17 @@ class GlobalConfig(BaseConfig): memory_pool : List[int] burble : bool Enable diagnostic printing from SuiteSparse:GraphBLAS - print_1based: bool - gpu_control : str, {"always", "never"} - gpu_chunk : double + print_1based : bool + gpu_id : int + Which GPU to use; default is -1, which means do not run on the GPU. + **GPU support is a work in progress--do not use** Setting values to None restores the default value for most configurations. """ _get_function = "GxB_Global_Option_get" _set_function = "GxB_Global_Option_set" + _context_keys = {"chunk", "gpu_id", "nthreads"} _null_valid = {"bitmap_switch"} _options = { # Matrix/Vector format @@ -143,9 +150,20 @@ class GlobalConfig(BaseConfig): # Diagnostics (skipping "printf" and "flush" for now) "burble": (lib.GxB_BURBLE, "bool"), "print_1based": (lib.GxB_PRINT_1BASED, "bool"), + # JIT control + # TODO: should this be in the global config or a separate JIT config? + "jit_c_control": (lib.GxB_JIT_C_CONTROL, "int"), + "jit_use_cmake": (lib.GxB_JIT_USE_CMAKE, "bool"), + "jit_c_compiler_name": (lib.GxB_JIT_C_COMPILER_NAME, "char*"), + "jit_c_compiler_flags": (lib.GxB_JIT_C_COMPILER_FLAGS, "char*"), + "jit_c_linker_flags": (lib.GxB_JIT_C_LINKER_FLAGS, "char*"), + "jit_c_libraries": (lib.GxB_JIT_C_LIBRARIES, "char*"), + "jit_c_cmake_libs": (lib.GxB_JIT_C_CMAKE_LIBS, "char*"), + "jit_c_preface": (lib.GxB_JIT_C_PREFACE, "char*"), + "jit_error_log": (lib.GxB_JIT_ERROR_LOG, "char*"), + "jit_cache_path": (lib.GxB_JIT_CACHE_PATH, "char*"), # CUDA GPU control - "gpu_control": (lib.GxB_GLOBAL_GPU_CONTROL, "GrB_Desc_Value"), - "gpu_chunk": (lib.GxB_GLOBAL_GPU_CHUNK, "double"), + "gpu_id": (lib.GxB_GLOBAL_GPU_ID, "int"), } # Values to restore defaults _defaults = { @@ -156,6 +174,7 @@ class GlobalConfig(BaseConfig): "chunk": 0, "burble": 0, "print_1based": 0, + "gpu_id": -1, # -1 means no GPU (I think) } _enumerations = { "format": { @@ -163,9 +182,38 @@ class GlobalConfig(BaseConfig): "by_col": lib.GxB_BY_COL, # "no_format": lib.GxB_NO_FORMAT, # Used by iterators; not valid here }, - "gpu_control": { - "always": lib.GxB_GPU_ALWAYS, - "never": lib.GxB_GPU_NEVER, + "jit_c_control": { + "off": lib.GxB_JIT_OFF, + "pause": lib.GxB_JIT_PAUSE, + "run": lib.GxB_JIT_RUN, + "load": lib.GxB_JIT_LOAD, + "on": lib.GxB_JIT_ON, + }, + } + + +class JitConfig(BaseConfig): + _get_function = "GxB_Global_Option_get" + _set_function = "GxB_Global_Option_set" + _options = { + "c_control": (lib.GxB_JIT_C_CONTROL, "int"), + "use_cmake": (lib.GxB_JIT_USE_CMAKE, "bool"), + "c_compiler_name": (lib.GxB_JIT_C_COMPILER_NAME, "char*"), + "c_compiler_flags": (lib.GxB_JIT_C_COMPILER_FLAGS, "char*"), + "c_linker_flags": (lib.GxB_JIT_C_LINKER_FLAGS, "char*"), + "c_libraries": (lib.GxB_JIT_C_LIBRARIES, "char*"), + "c_cmake_libs": (lib.GxB_JIT_C_CMAKE_LIBS, "char*"), + "c_preface": (lib.GxB_JIT_C_PREFACE, "char*"), + "error_log": (lib.GxB_JIT_ERROR_LOG, "char*"), + "cache_path": (lib.GxB_JIT_CACHE_PATH, "char*"), + } + _enumerations = { + "jit_c_control": { + "off": lib.GxB_JIT_OFF, + "pause": lib.GxB_JIT_PAUSE, + "run": lib.GxB_JIT_RUN, + "load": lib.GxB_JIT_LOAD, + "on": lib.GxB_JIT_ON, }, } @@ -254,4 +302,5 @@ def __len__(self): about = About() -config = GlobalConfig() +config = GlobalConfig(context=global_context) +jit = JitConfig() diff --git a/graphblas/tests/test_matrix.py b/graphblas/tests/test_matrix.py index 26017f364..bc942bc49 100644 --- a/graphblas/tests/test_matrix.py +++ b/graphblas/tests/test_matrix.py @@ -4298,7 +4298,7 @@ def test_ss_descriptors(A): A(nthreads=4, axb_method="dot", sort=True) << A @ A assert A.isequal(C2) # Bad option should show list of valid options - with pytest.raises(ValueError, match="nthreads"): + with pytest.raises(ValueError, match="axb_method"): C1(bad_opt=True) << A with pytest.raises(ValueError, match="Duplicate descriptor"): (A @ A).new(nthreads=4, Nthreads=5) diff --git a/pyproject.toml b/pyproject.toml index 9d635c778..c568f591f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -63,7 +63,7 @@ dependencies = [ "pyyaml >=5.4", # These won't be installed by default after 2024.3.0 # Use e.g. "python-graphblas[suitesparse]" or "python-graphblas[default]" instead - "suitesparse-graphblas >=7.4.0.0, <7.5", + "suitesparse-graphblas >=8.0.0.0, <8.1", "numba >=0.55; python_version<'3.12'", # make optional where numba is not supported ] @@ -75,7 +75,7 @@ changelog = "https://github.com/python-graphblas/python-graphblas/releases" [project.optional-dependencies] suitesparse = [ - "suitesparse-graphblas >=7.4.0.0, <7.5", + "suitesparse-graphblas >=8.0.0.0, <8.1", ] networkx = [ "networkx >=2.8", diff --git a/scripts/check_versions.sh b/scripts/check_versions.sh index dda7adbaa..c16088b2b 100755 --- a/scripts/check_versions.sh +++ b/scripts/check_versions.sh @@ -7,10 +7,10 @@ conda search 'numpy[channel=conda-forge]>=1.24.3' conda search 'pandas[channel=conda-forge]>=2.0.1' conda search 'scipy[channel=conda-forge]>=1.10.1' conda search 'networkx[channel=conda-forge]>=3.1' -conda search 'awkward[channel=conda-forge]>=2.2.0' +conda search 'awkward[channel=conda-forge]>=2.2.1' conda search 'sparse[channel=conda-forge]>=0.14.0' conda search 'fast_matrix_market[channel=conda-forge]>=1.5.1' -conda search 'numba[channel=conda-forge]>=0.56.4' +conda search 'numba[channel=conda-forge]>=0.57.0' conda search 'pyyaml[channel=conda-forge]>=6.0' conda search 'flake8-bugbear[channel=conda-forge]>=23.5.9' conda search 'flake8-simplify[channel=conda-forge]>=0.20.0' From 94f968b52a67c6fbcac0bdfb4a30499dcf00b59c Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Sat, 20 May 2023 21:44:39 -0500 Subject: [PATCH 02/40] Let the tests run --- .github/workflows/test_and_build.yml | 2 +- graphblas/tests/test_formatting.py | 33 ++++++++++++++-------------- 2 files changed, 18 insertions(+), 17 deletions(-) diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index 537ec16cd..ddc1dd44c 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -85,7 +85,7 @@ jobs: shell: bash -l {0} strategy: # To "stress test" in CI, set `fail-fast` to `false` and perhaps add more items to `matrix.slowtask` - fail-fast: true + fail-fast: false # The build matrix is [os]x[slowtask] and then randomly chooses [pyver] and [sourcetype]. # This should ensure we'll have full code coverage (i.e., no chance of getting unlucky), # since we need to run all slow tests on Windows and non-Windoes OSes. diff --git a/graphblas/tests/test_formatting.py b/graphblas/tests/test_formatting.py index faadc983b..db5ed93c4 100644 --- a/graphblas/tests/test_formatting.py +++ b/graphblas/tests/test_formatting.py @@ -4664,22 +4664,23 @@ def test_large_iso(): v = Vector(int, size=2**60) v[:] = 1 repr_printer(v, "v") - assert repr(v) == ( - '"v_0" nvals size dtype format\n' - "gb.Vector 1152921504606846976 1152921504606846976 INT64 full (iso)\n" - "----------------------------------------------------------------------\n" - "index 0 ... 1152921504606846975\n" - "value 1 ... 1" - ) - repr_printer(v.S, "v.S") - assert repr(v.S) == ( - '"v_0.S" nvals size dtype format\n' - "StructuralMask\n" - "of gb.Vector 1152921504606846976 1152921504606846976 INT64 full (iso)\n" - "---------------------------------------------------------------------------\n" - "index 0 ... 1152921504606846975\n" - "value 1 ... 1" - ) + if False: # TODO XXX FIXME TEMPORARY + assert repr(v) == ( + '"v_0" nvals size dtype format\n' + "gb.Vector 1152921504606846976 1152921504606846976 INT64 full (iso)\n" + "----------------------------------------------------------------------\n" + "index 0 ... 1152921504606846975\n" + "value 1 ... 1" + ) + repr_printer(v.S, "v.S") + assert repr(v.S) == ( + '"v_0.S" nvals size dtype format\n' + "StructuralMask\n" + "of gb.Vector 1152921504606846976 1152921504606846976 INT64 full (iso)\n" + "---------------------------------------------------------------------------\n" + "index 0 ... 1152921504606846975\n" + "value 1 ... 1" + ) def test_index_expr_vector(v): From f75ee19455a493bf2eabdb28da351f36efb95863 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Sat, 20 May 2023 21:58:02 -0500 Subject: [PATCH 03/40] oops --- graphblas/ss/_core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/graphblas/ss/_core.py b/graphblas/ss/_core.py index 1559a1c5b..fbc60ea15 100644 --- a/graphblas/ss/_core.py +++ b/graphblas/ss/_core.py @@ -208,7 +208,7 @@ class JitConfig(BaseConfig): "cache_path": (lib.GxB_JIT_CACHE_PATH, "char*"), } _enumerations = { - "jit_c_control": { + "c_control": { "off": lib.GxB_JIT_OFF, "pause": lib.GxB_JIT_PAUSE, "run": lib.GxB_JIT_RUN, From b57cbff33bf5d94d7fccb9b9883a4e6f1cc25a10 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Sun, 21 May 2023 21:51:55 -0500 Subject: [PATCH 04/40] Allow contexts to stack/chain together --- graphblas/core/base.py | 2 +- graphblas/core/expr.py | 2 +- graphblas/core/matrix.py | 4 +- graphblas/core/ss/config.py | 3 +- graphblas/core/ss/context.py | 106 ++++++++++++++++++++------------ graphblas/core/ss/descriptor.py | 2 +- graphblas/core/vector.py | 6 +- graphblas/exceptions.py | 2 + graphblas/ss/_core.py | 4 +- 9 files changed, 80 insertions(+), 51 deletions(-) diff --git a/graphblas/core/base.py b/graphblas/core/base.py index ac94abe10..42a4de9a1 100644 --- a/graphblas/core/base.py +++ b/graphblas/core/base.py @@ -571,7 +571,7 @@ def _new(self, dtype, mask, name, is_cscalar=None, **opts): ): if opts: # Ignore opts for now - desc = descriptor_lookup(**opts) # noqa: F841 + desc = descriptor_lookup(**opts) # noqa: F841 (keep desc in scope for context) if self._is_scalar and self._value._is_cscalar != is_cscalar: return self._value.dup(is_cscalar=is_cscalar, name=name) rv = self._value diff --git a/graphblas/core/expr.py b/graphblas/core/expr.py index 03fb30ef1..d803939a5 100644 --- a/graphblas/core/expr.py +++ b/graphblas/core/expr.py @@ -421,7 +421,7 @@ def _setitem(self, resolved_indexes, obj, *, is_submask): # Fast path using assignElement if self.opts: # Ignore opts for now - desc = descriptor_lookup(**self.opts) # noqa: F841 + desc = descriptor_lookup(**self.opts) # noqa: F841 (keep desc in scope for context) self.parent._assign_element(resolved_indexes, obj) else: mask = self.kwargs.get("mask") diff --git a/graphblas/core/matrix.py b/graphblas/core/matrix.py index 73646f901..4696d8ead 100644 --- a/graphblas/core/matrix.py +++ b/graphblas/core/matrix.py @@ -665,7 +665,7 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts): else: if opts: # Ignore opts for now - desc = descriptor_lookup(**opts) # noqa: F841 + desc = descriptor_lookup(**opts) # noqa: F841 (keep desc in scope for context) new_mat = ffi_new("GrB_Matrix*") rv = Matrix._from_obj(new_mat, self.dtype, self._nrows, self._ncols, name=name) call("GrB_Matrix_dup", [_Pointer(rv), self]) @@ -2707,7 +2707,7 @@ def _extract_element( result = Scalar(dtype, is_cscalar=is_cscalar, name=name) if opts: # Ignore opts for now - desc = descriptor_lookup(**opts) # noqa: F841 + desc = descriptor_lookup(**opts) # noqa: F841 (keep desc in scope for context) if is_cscalar: dtype_name = "UDT" if dtype._is_udt else dtype.name if ( diff --git a/graphblas/core/ss/config.py b/graphblas/core/ss/config.py index 52ee33478..433716bb3 100644 --- a/graphblas/core/ss/config.py +++ b/graphblas/core/ss/config.py @@ -182,7 +182,8 @@ def __setitem__(self, key, val): if self._context is None: from .context import Context - self._context = Context._maybe_new() + self._context = Context(engage=False) + self._context._engage() # Disengage when context goes out of scope self._parent._context = self._context # Set context to descriptor info = set_function(self._context._carg, key_obj, val_obj) elif self._parent is None: diff --git a/graphblas/core/ss/context.py b/graphblas/core/ss/context.py index 808fc1fc0..46921a60f 100644 --- a/graphblas/core/ss/context.py +++ b/graphblas/core/ss/context.py @@ -1,21 +1,12 @@ import threading -from ...exceptions import check_status, check_status_carg +from ...exceptions import InvalidValue, check_status, check_status_carg from .. import ffi, lib from .config import BaseConfig ffi_new = ffi.new -class ThreadLocal(threading.local): - """Hold the active context for the current thread.""" - - context = None - - -threadlocal = ThreadLocal() - - class Context(BaseConfig): _context_keys = {"chunk", "gpu_id", "nthreads"} _options = { @@ -26,62 +17,88 @@ class Context(BaseConfig): _defaults = { "nthreads": 0, "chunk": 0, - "gpu_id": -1, # -1 means no GPU (I think) + "gpu_id": -1, # -1 means no GPU } - def __init__(self, engage=True, *, nthreads=None, chunk=None, gpu_id=None): + def __init__(self, engage=True, *, stack=True, nthreads=None, chunk=None, gpu_id=None): super().__init__() - if nthreads is not None: - self["nthreads"] = nthreads - if chunk is not None: - self["chunk"] = chunk - if gpu_id is not None: - self["gpu_id"] = gpu_id - if engage: - self.engage() - - def __new__(cls, engage=True, **opts): - self = object.__new__(cls) self.gb_obj = ffi_new("GxB_Context*") check_status_carg(lib.GxB_Context_new(self.gb_obj), "Context", self.gb_obj[0]) - return self + if stack: + context = threadlocal.context + self["nthreads"] = context["nthreads"] if nthreads is None else nthreads + self["chunk"] = context["chunk"] if chunk is None else chunk + self["gpu_id"] = context["gpu_id"] if gpu_id is None else gpu_id + else: + if nthreads is not None: + self["nthreads"] = nthreads + if chunk is not None: + self["chunk"] = chunk + if gpu_id is not None: + self["gpu_id"] = gpu_id + self._prev_context = None + if engage: + self.engage() @classmethod def _from_obj(cls, gb_obj=None): self = object.__new__(cls) self.gb_obj = gb_obj - self.__init__(engage=False) - return self - - @classmethod - def _maybe_new(cls): - if threadlocal.context is not None: - return threadlocal.context - self = cls(engage=False) - check_status(lib.GxB_Context_engage(self._carg), self) - # Don't assign to threadlocal.context; instead, let it disengage upon going out of scope + self._prev_context = None + super().__init__(self) return self @property def _carg(self): return self.gb_obj[0] + def dup(self, engage=True, *, nthreads=None, chunk=None, gpu_id=None): + if nthreads is None: + nthreads = self["nthreads"] + if chunk is None: + chunk = self["chunk"] + if gpu_id is None: + gpu_id = self["gpu_id"] + return type(self)(engage, stack=False, nthreads=nthreads, chunk=chunk, gpu_id=gpu_id) + def __del__(self): gb_obj = getattr(self, "gb_obj", None) if gb_obj is not None and lib is not None: # pragma: no branch (safety) try: - lib.GxB_Context_disengage(gb_obj[0]) - finally: - check_status(lib.GxB_Context_free(gb_obj), self) + self.disengage() + except InvalidValue: + pass + lib.GxB_Context_free(gb_obj) def engage(self): + if self._prev_context is None and (context := threadlocal.context) is not self: + self._prev_context = context check_status(lib.GxB_Context_engage(self._carg), self) threadlocal.context = self + def _engage(self): + """Like engage, but don't set to threadlocal.context. + + This is useful if you want to disengage when the object is deleted by going out of scope. + """ + if self._prev_context is None and (context := threadlocal.context) is not self: + self._prev_context = context + check_status(lib.GxB_Context_engage(self._carg), self) + def disengage(self): + prev_context = self._prev_context + self._prev_context = None if threadlocal.context is self: - threadlocal.context = None - check_status(lib.GxB_Context_disengage(self._carg), self) + if prev_context is not None: + threadlocal.context = prev_context + prev_context.engage() + else: + threadlocal.context = global_context + check_status(lib.GxB_Context_disengage(self._carg), self) + elif prev_context is not None and threadlocal.context is prev_context: + prev_context.engage() + else: + check_status(lib.GxB_Context_disengage(self._carg), self) def __enter__(self): self.engage() @@ -95,7 +112,7 @@ def _context(self): @_context.setter def _context(self, val): - if val is not None: + if val is not None and val is not self: raise AttributeError("'_context' attribute is read-only") @@ -109,3 +126,12 @@ def __del__(self): # pragma: no cover (safety) global_context = GlobalContext._from_obj(lib.GxB_CONTEXT_WORLD) + + +class ThreadLocal(threading.local): + """Hold the active context for the current thread.""" + + context = global_context + + +threadlocal = ThreadLocal() diff --git a/graphblas/core/ss/descriptor.py b/graphblas/core/ss/descriptor.py index 66839275c..d09842996 100644 --- a/graphblas/core/ss/descriptor.py +++ b/graphblas/core/ss/descriptor.py @@ -86,7 +86,7 @@ class _DescriptorConfig(BaseConfig): "axb_method": "default", "sort": False, "secure_import": False, - "gpu_id": -1, # -1 means no GPU (I think) + "gpu_id": -1, # -1 means no GPU } def __init__(self): diff --git a/graphblas/core/vector.py b/graphblas/core/vector.py index 102c692bd..cd5b992ba 100644 --- a/graphblas/core/vector.py +++ b/graphblas/core/vector.py @@ -612,7 +612,7 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts): else: if opts: # Ignore opts for now - desc = descriptor_lookup(**opts) # noqa: F841 + desc = descriptor_lookup(**opts) # noqa: F841 (keep desc in scope for context) rv = Vector._from_obj(ffi_new("GrB_Vector*"), self.dtype, self._size, name=name) call("GrB_Vector_dup", [_Pointer(rv), self]) return rv @@ -1757,7 +1757,7 @@ def _extract_element( result = Scalar(dtype, is_cscalar=is_cscalar, name=name) if opts: # Ignore opts for now - desc = descriptor_lookup(**opts) # noqa: F841 + desc = descriptor_lookup(**opts) # noqa: F841 (keep desc in scope for context) if is_cscalar: dtype_name = "UDT" if dtype._is_udt else dtype.name if ( @@ -2179,7 +2179,7 @@ def dup(self, dtype=None, *, clear=False, mask=None, name=None, **opts): dtype = self.dtype if opts: # Ignore opts for now - desc = descriptor_lookup(**opts) # noqa: F841 + desc = descriptor_lookup(**opts) # noqa: F841 (keep desc in scope for context) return self.output_type(dtype, *self.shape, name=name) return self.new(dtype, mask=mask, name=name, **opts) diff --git a/graphblas/exceptions.py b/graphblas/exceptions.py index 0acc9ed0b..7dc1c2836 100644 --- a/graphblas/exceptions.py +++ b/graphblas/exceptions.py @@ -148,6 +148,8 @@ def check_status_carg(response_code, type_name, carg): " https://github.com/python-graphblas/python-graphblas/issues\n" "Thanks (and sorry)!" ) + if type_name == "Context": # SuiteSparse does not have GxB_Context_error + text = "" else: string = _ffi.new("char**") error_func(string, carg) diff --git a/graphblas/ss/_core.py b/graphblas/ss/_core.py index fbc60ea15..cb4fbab20 100644 --- a/graphblas/ss/_core.py +++ b/graphblas/ss/_core.py @@ -56,7 +56,7 @@ def diag(x, k=0, dtype=None, *, name=None, **opts): if typ is Vector: if opts: # Ignore opts for now - desc = descriptor_lookup(**opts) # noqa: F841 + desc = descriptor_lookup(**opts) # noqa: F841 (keep desc in scope for context) size = x._size + abs(k.value) rv = Matrix(dtype, nrows=size, ncols=size, name=name) rv.ss.build_diag(x, k) @@ -174,7 +174,7 @@ class GlobalConfig(BaseConfig): "chunk": 0, "burble": 0, "print_1based": 0, - "gpu_id": -1, # -1 means no GPU (I think) + "gpu_id": -1, # -1 means no GPU } _enumerations = { "format": { From 0ae080ed45d4fb827ebc077bd3520820106d2004 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Fri, 2 Jun 2023 21:52:43 -0500 Subject: [PATCH 05/40] Update to 8.0.1 --- .github/workflows/test_and_build.yml | 4 ++-- docs/env.yml | 2 +- graphblas/exceptions.py | 2 -- graphblas/ss/__init__.py | 2 +- graphblas/ss/_core.py | 28 ----------------------- graphblas/tests/test_formatting.py | 33 ++++++++++++++-------------- pyproject.toml | 4 ++-- 7 files changed, 22 insertions(+), 53 deletions(-) diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index 1eb20e8ba..5d5be8814 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -204,13 +204,13 @@ jobs: # But, it's still useful for us to test with different versions! psg="" if [[ ${{ steps.sourcetype.outputs.selected}} == "conda-forge" ]] ; then - psgver=$(python -c 'import random ; print(random.choice(["=8.0.0.0", ""]))') + psgver=$(python -c 'import random ; print(random.choice(["=8.0.1.0", ""]))') psg=python-suitesparse-graphblas${psgver} elif [[ ${{ steps.sourcetype.outputs.selected}} == "wheel" ]] ; then psgver=$(python -c 'import random ; print(random.choice(["==7.4.3.2", ""]))') elif [[ ${{ steps.sourcetype.outputs.selected}} == "source" ]] ; then # These should be exact versions - psgver=$(python -c 'import random ; print(random.choice(["==8.0.0.0", ""]))') + psgver=$(python -c 'import random ; print(random.choice(["==8.0.1.0", ""]))') else psgver="" fi diff --git a/docs/env.yml b/docs/env.yml index 631f770f7..2771e338a 100644 --- a/docs/env.yml +++ b/docs/env.yml @@ -8,7 +8,7 @@ dependencies: # python-graphblas dependencies - donfig - numba - - python-suitesparse-graphblas>=8.0.0.0 + - python-suitesparse-graphblas>=8.0.1.0 - pyyaml # extra dependencies - matplotlib diff --git a/graphblas/exceptions.py b/graphblas/exceptions.py index 7dc1c2836..0acc9ed0b 100644 --- a/graphblas/exceptions.py +++ b/graphblas/exceptions.py @@ -148,8 +148,6 @@ def check_status_carg(response_code, type_name, carg): " https://github.com/python-graphblas/python-graphblas/issues\n" "Thanks (and sorry)!" ) - if type_name == "Context": # SuiteSparse does not have GxB_Context_error - text = "" else: string = _ffi.new("char**") error_func(string, carg) diff --git a/graphblas/ss/__init__.py b/graphblas/ss/__init__.py index aa140248d..25025a134 100644 --- a/graphblas/ss/__init__.py +++ b/graphblas/ss/__init__.py @@ -1,2 +1,2 @@ from ..core.ss.context import Context, global_context -from ._core import about, concat, config, diag, jit +from ._core import about, concat, config, diag diff --git a/graphblas/ss/_core.py b/graphblas/ss/_core.py index cb4fbab20..f05c05f6b 100644 --- a/graphblas/ss/_core.py +++ b/graphblas/ss/_core.py @@ -151,7 +151,6 @@ class GlobalConfig(BaseConfig): "burble": (lib.GxB_BURBLE, "bool"), "print_1based": (lib.GxB_PRINT_1BASED, "bool"), # JIT control - # TODO: should this be in the global config or a separate JIT config? "jit_c_control": (lib.GxB_JIT_C_CONTROL, "int"), "jit_use_cmake": (lib.GxB_JIT_USE_CMAKE, "bool"), "jit_c_compiler_name": (lib.GxB_JIT_C_COMPILER_NAME, "char*"), @@ -192,32 +191,6 @@ class GlobalConfig(BaseConfig): } -class JitConfig(BaseConfig): - _get_function = "GxB_Global_Option_get" - _set_function = "GxB_Global_Option_set" - _options = { - "c_control": (lib.GxB_JIT_C_CONTROL, "int"), - "use_cmake": (lib.GxB_JIT_USE_CMAKE, "bool"), - "c_compiler_name": (lib.GxB_JIT_C_COMPILER_NAME, "char*"), - "c_compiler_flags": (lib.GxB_JIT_C_COMPILER_FLAGS, "char*"), - "c_linker_flags": (lib.GxB_JIT_C_LINKER_FLAGS, "char*"), - "c_libraries": (lib.GxB_JIT_C_LIBRARIES, "char*"), - "c_cmake_libs": (lib.GxB_JIT_C_CMAKE_LIBS, "char*"), - "c_preface": (lib.GxB_JIT_C_PREFACE, "char*"), - "error_log": (lib.GxB_JIT_ERROR_LOG, "char*"), - "cache_path": (lib.GxB_JIT_CACHE_PATH, "char*"), - } - _enumerations = { - "c_control": { - "off": lib.GxB_JIT_OFF, - "pause": lib.GxB_JIT_PAUSE, - "run": lib.GxB_JIT_RUN, - "load": lib.GxB_JIT_LOAD, - "on": lib.GxB_JIT_ON, - }, - } - - class About(Mapping): _modes = { lib.GrB_NONBLOCKING: "nonblocking", @@ -303,4 +276,3 @@ def __len__(self): about = About() config = GlobalConfig(context=global_context) -jit = JitConfig() diff --git a/graphblas/tests/test_formatting.py b/graphblas/tests/test_formatting.py index db5ed93c4..faadc983b 100644 --- a/graphblas/tests/test_formatting.py +++ b/graphblas/tests/test_formatting.py @@ -4664,23 +4664,22 @@ def test_large_iso(): v = Vector(int, size=2**60) v[:] = 1 repr_printer(v, "v") - if False: # TODO XXX FIXME TEMPORARY - assert repr(v) == ( - '"v_0" nvals size dtype format\n' - "gb.Vector 1152921504606846976 1152921504606846976 INT64 full (iso)\n" - "----------------------------------------------------------------------\n" - "index 0 ... 1152921504606846975\n" - "value 1 ... 1" - ) - repr_printer(v.S, "v.S") - assert repr(v.S) == ( - '"v_0.S" nvals size dtype format\n' - "StructuralMask\n" - "of gb.Vector 1152921504606846976 1152921504606846976 INT64 full (iso)\n" - "---------------------------------------------------------------------------\n" - "index 0 ... 1152921504606846975\n" - "value 1 ... 1" - ) + assert repr(v) == ( + '"v_0" nvals size dtype format\n' + "gb.Vector 1152921504606846976 1152921504606846976 INT64 full (iso)\n" + "----------------------------------------------------------------------\n" + "index 0 ... 1152921504606846975\n" + "value 1 ... 1" + ) + repr_printer(v.S, "v.S") + assert repr(v.S) == ( + '"v_0.S" nvals size dtype format\n' + "StructuralMask\n" + "of gb.Vector 1152921504606846976 1152921504606846976 INT64 full (iso)\n" + "---------------------------------------------------------------------------\n" + "index 0 ... 1152921504606846975\n" + "value 1 ... 1" + ) def test_index_expr_vector(v): diff --git a/pyproject.toml b/pyproject.toml index c568f591f..7fbdbb8b6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -63,7 +63,7 @@ dependencies = [ "pyyaml >=5.4", # These won't be installed by default after 2024.3.0 # Use e.g. "python-graphblas[suitesparse]" or "python-graphblas[default]" instead - "suitesparse-graphblas >=8.0.0.0, <8.1", + "suitesparse-graphblas >=8.0.1.0, <8.1", "numba >=0.55; python_version<'3.12'", # make optional where numba is not supported ] @@ -75,7 +75,7 @@ changelog = "https://github.com/python-graphblas/python-graphblas/releases" [project.optional-dependencies] suitesparse = [ - "suitesparse-graphblas >=8.0.0.0, <8.1", + "suitesparse-graphblas >=8.0.1.0, <8.1", ] networkx = [ "networkx >=2.8", From a1b757ee5a89f82b060ab07de6e320b133b5e91d Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Fri, 2 Jun 2023 23:10:57 -0500 Subject: [PATCH 06/40] Make `dtypes` a directory so we can add `gb.dtypes.ss` --- graphblas/core/operator/binary.py | 2 +- graphblas/core/operator/indexunary.py | 3 +- graphblas/core/operator/unary.py | 2 +- graphblas/core/ss/matrix.py | 3 +- graphblas/core/ss/vector.py | 3 +- graphblas/dtypes/__init__.py | 23 +++ graphblas/{dtypes.py => dtypes/_core.py} | 192 +++++++++++------------ graphblas/tests/test_dtype.py | 10 +- pyproject.toml | 1 + 9 files changed, 131 insertions(+), 108 deletions(-) create mode 100644 graphblas/dtypes/__init__.py rename graphblas/{dtypes.py => dtypes/_core.py} (71%) diff --git a/graphblas/core/operator/binary.py b/graphblas/core/operator/binary.py index 406405a80..5ea693c73 100644 --- a/graphblas/core/operator/binary.py +++ b/graphblas/core/operator/binary.py @@ -19,10 +19,10 @@ UINT16, UINT32, UINT64, - _sample_values, _supports_complex, lookup_dtype, ) +from ...dtypes._core import _sample_values from ...exceptions import UdfParseError, check_status_carg from .. import _has_numba, _supports_udfs, ffi, lib from ..expr import InfixExprBase diff --git a/graphblas/core/operator/indexunary.py b/graphblas/core/operator/indexunary.py index f6637ae6d..442677147 100644 --- a/graphblas/core/operator/indexunary.py +++ b/graphblas/core/operator/indexunary.py @@ -3,7 +3,8 @@ from types import FunctionType from ... import _STANDARD_OPERATOR_NAMES, indexunary, select -from ...dtypes import BOOL, FP64, INT8, INT64, UINT64, _sample_values, lookup_dtype +from ...dtypes import BOOL, FP64, INT8, INT64, UINT64, lookup_dtype +from ...dtypes._core import _sample_values from ...exceptions import UdfParseError, check_status_carg from .. import _has_numba, ffi, lib from .base import OpBase, ParameterizedUdf, TypedOpBase, _call_op, _deserialize_parameterized diff --git a/graphblas/core/operator/unary.py b/graphblas/core/operator/unary.py index a02445836..4348b3b80 100644 --- a/graphblas/core/operator/unary.py +++ b/graphblas/core/operator/unary.py @@ -15,10 +15,10 @@ UINT16, UINT32, UINT64, - _sample_values, _supports_complex, lookup_dtype, ) +from ...dtypes._core import _sample_values from ...exceptions import UdfParseError, check_status_carg from .. import _has_numba, ffi, lib from ..utils import output_type diff --git a/graphblas/core/ss/matrix.py b/graphblas/core/ss/matrix.py index 64aa43a96..990d692b9 100644 --- a/graphblas/core/ss/matrix.py +++ b/graphblas/core/ss/matrix.py @@ -7,7 +7,8 @@ import graphblas as gb from ... import binary, monoid -from ...dtypes import _INDEX, BOOL, INT64, UINT64, _string_to_dtype, lookup_dtype +from ...dtypes import _INDEX, BOOL, INT64, UINT64, lookup_dtype +from ...dtypes._core import _string_to_dtype from ...exceptions import _error_code_lookup, check_status, check_status_carg from .. import NULL, _has_numba, ffi, lib from ..base import call diff --git a/graphblas/core/ss/vector.py b/graphblas/core/ss/vector.py index 1babc556e..ff9e233eb 100644 --- a/graphblas/core/ss/vector.py +++ b/graphblas/core/ss/vector.py @@ -6,7 +6,8 @@ import graphblas as gb from ... import binary, monoid -from ...dtypes import _INDEX, INT64, UINT64, _string_to_dtype, lookup_dtype +from ...dtypes import _INDEX, INT64, UINT64, lookup_dtype +from ...dtypes._core import _string_to_dtype from ...exceptions import _error_code_lookup, check_status, check_status_carg from .. import NULL, ffi, lib from ..base import call diff --git a/graphblas/dtypes/__init__.py b/graphblas/dtypes/__init__.py new file mode 100644 index 000000000..033557ece --- /dev/null +++ b/graphblas/dtypes/__init__.py @@ -0,0 +1,23 @@ +from ._core import ( + _INDEX, + BOOL, + FP32, + FP64, + INT8, + INT16, + INT32, + INT64, + UINT8, + UINT16, + UINT32, + UINT64, + DataType, + _supports_complex, + lookup_dtype, + register_anonymous, + register_new, + unify, +) + +if _supports_complex: + from ._core import FC32, FC64 diff --git a/graphblas/dtypes.py b/graphblas/dtypes/_core.py similarity index 71% rename from graphblas/dtypes.py rename to graphblas/dtypes/_core.py index 920610b95..8918f5aa1 100644 --- a/graphblas/dtypes.py +++ b/graphblas/dtypes/_core.py @@ -1,20 +1,16 @@ -import warnings as _warnings +import warnings -import numpy as _np -from numpy import find_common_type as _find_common_type -from numpy import promote_types as _promote_types +import numpy as np +from numpy import find_common_type, promote_types -from . import backend -from .core import NULL as _NULL -from .core import _has_numba -from .core import ffi as _ffi -from .core import lib as _lib +from .. import backend, dtypes +from ..core import NULL, _has_numba, ffi, lib if _has_numba: - import numba as _numba + import numba # Default assumption unless FC32/FC64 are found in lib -_supports_complex = hasattr(_lib, "GrB_FC64") or hasattr(_lib, "GxB_FC64") +_supports_complex = hasattr(lib, "GrB_FC64") or hasattr(lib, "GxB_FC64") class DataType: @@ -26,7 +22,7 @@ def __init__(self, name, gb_obj, gb_name, c_type, numba_type, np_type): self.gb_name = gb_name self.c_type = c_type self.numba_type = numba_type - self.np_type = _np.dtype(np_type) + self.np_type = np.dtype(np_type) def __repr__(self): return self.name @@ -62,7 +58,7 @@ def _carg(self): @property def _is_anonymous(self): - return globals().get(self.name) is not self + return getattr(dtypes, self.name, None) is not self @property def _is_udt(self): @@ -80,27 +76,27 @@ def _deserialize(name, dtype, is_anonymous): def register_new(name, dtype): if not name.isidentifier(): raise ValueError(f"`name` argument must be a valid Python identifier; got: {name!r}") - if name in _registry or name in globals(): + if name in _registry or hasattr(dtypes, name): raise ValueError(f"{name!r} name for dtype is unavailable") rv = register_anonymous(dtype, name) _registry[name] = rv - globals()[name] = rv + setattr(dtypes, name, rv) return rv def register_anonymous(dtype, name=None): try: - dtype = _np.dtype(dtype) + dtype = np.dtype(dtype) except TypeError: if isinstance(dtype, dict): # Allow dtypes such as `{'x': int, 'y': float}` for convenience - dtype = _np.dtype([(key, lookup_dtype(val).np_type) for key, val in dtype.items()]) + dtype = np.dtype([(key, lookup_dtype(val).np_type) for key, val in dtype.items()]) elif isinstance(dtype, str) and "[" in dtype and dtype.endswith("]"): # Allow dtypes such as `"INT64[3, 4]"` for convenience base_dtype, shape = dtype.split("[", 1) base_dtype = lookup_dtype(base_dtype) - shape = _np.lib.format.safe_eval(f"[{shape}") - dtype = _np.dtype((base_dtype.np_type, shape)) + shape = np.lib.format.safe_eval(f"[{shape}") + dtype = np.dtype((base_dtype.np_type, shape)) else: raise if dtype in _registry: @@ -114,36 +110,36 @@ def register_anonymous(dtype, name=None): if dtype.hasobject: raise ValueError("dtype must not allow Python objects") - from .exceptions import check_status_carg + from ..exceptions import check_status_carg - gb_obj = _ffi.new("GrB_Type*") + gb_obj = ffi.new("GrB_Type*") if backend == "suitesparse": # We name this so that we can serialize and deserialize UDTs # We don't yet have C definitions np_repr = _dtype_to_string(dtype).encode() - if len(np_repr) > _lib.GxB_MAX_NAME_LEN: + if len(np_repr) > lib.GxB_MAX_NAME_LEN: msg = ( f"UDT repr is too large to serialize ({len(repr(dtype).encode())} > " - f"{_lib.GxB_MAX_NAME_LEN})." + f"{lib.GxB_MAX_NAME_LEN})." ) if name is not None: - np_repr = name.encode()[: _lib.GxB_MAX_NAME_LEN] + np_repr = name.encode()[: lib.GxB_MAX_NAME_LEN] else: - np_repr = np_repr[: _lib.GxB_MAX_NAME_LEN] - _warnings.warn( + np_repr = np_repr[: lib.GxB_MAX_NAME_LEN] + warnings.warn( f"{msg}. It will use the following name, " f"and the dtype may need to be specified when deserializing: {np_repr}", stacklevel=2, ) - status = _lib.GxB_Type_new(gb_obj, dtype.itemsize, np_repr, _NULL) + status = lib.GxB_Type_new(gb_obj, dtype.itemsize, np_repr, NULL) else: - status = _lib.GrB_Type_new(gb_obj, dtype.itemsize) + status = lib.GrB_Type_new(gb_obj, dtype.itemsize) check_status_carg(status, "Type", gb_obj[0]) # For now, let's use "opaque" unsigned bytes for the c type. if name is None: name = _default_name(dtype) - numba_type = _numba.typeof(dtype).dtype if _has_numba else None + numba_type = numba.typeof(dtype).dtype if _has_numba else None rv = DataType(name, gb_obj, None, f"uint8_t[{dtype.itemsize}]", numba_type, dtype) _registry[gb_obj] = rv _registry[dtype] = rv @@ -155,153 +151,153 @@ def register_anonymous(dtype, name=None): BOOL = DataType( "BOOL", - _lib.GrB_BOOL, + lib.GrB_BOOL, "GrB_BOOL", "_Bool", - _numba.types.bool_ if _has_numba else None, - _np.bool_, + numba.types.bool_ if _has_numba else None, + np.bool_, ) INT8 = DataType( - "INT8", _lib.GrB_INT8, "GrB_INT8", "int8_t", _numba.types.int8 if _has_numba else None, _np.int8 + "INT8", lib.GrB_INT8, "GrB_INT8", "int8_t", numba.types.int8 if _has_numba else None, np.int8 ) UINT8 = DataType( "UINT8", - _lib.GrB_UINT8, + lib.GrB_UINT8, "GrB_UINT8", "uint8_t", - _numba.types.uint8 if _has_numba else None, - _np.uint8, + numba.types.uint8 if _has_numba else None, + np.uint8, ) INT16 = DataType( "INT16", - _lib.GrB_INT16, + lib.GrB_INT16, "GrB_INT16", "int16_t", - _numba.types.int16 if _has_numba else None, - _np.int16, + numba.types.int16 if _has_numba else None, + np.int16, ) UINT16 = DataType( "UINT16", - _lib.GrB_UINT16, + lib.GrB_UINT16, "GrB_UINT16", "uint16_t", - _numba.types.uint16 if _has_numba else None, - _np.uint16, + numba.types.uint16 if _has_numba else None, + np.uint16, ) INT32 = DataType( "INT32", - _lib.GrB_INT32, + lib.GrB_INT32, "GrB_INT32", "int32_t", - _numba.types.int32 if _has_numba else None, - _np.int32, + numba.types.int32 if _has_numba else None, + np.int32, ) UINT32 = DataType( "UINT32", - _lib.GrB_UINT32, + lib.GrB_UINT32, "GrB_UINT32", "uint32_t", - _numba.types.uint32 if _has_numba else None, - _np.uint32, + numba.types.uint32 if _has_numba else None, + np.uint32, ) INT64 = DataType( "INT64", - _lib.GrB_INT64, + lib.GrB_INT64, "GrB_INT64", "int64_t", - _numba.types.int64 if _has_numba else None, - _np.int64, + numba.types.int64 if _has_numba else None, + np.int64, ) # _Index (like UINT64) is for internal use only and shouldn't be exposed to the user _INDEX = DataType( "UINT64", - _lib.GrB_UINT64, + lib.GrB_UINT64, "GrB_Index", "GrB_Index", - _numba.types.uint64 if _has_numba else None, - _np.uint64, + numba.types.uint64 if _has_numba else None, + np.uint64, ) UINT64 = DataType( "UINT64", - _lib.GrB_UINT64, + lib.GrB_UINT64, "GrB_UINT64", "uint64_t", - _numba.types.uint64 if _has_numba else None, - _np.uint64, + numba.types.uint64 if _has_numba else None, + np.uint64, ) FP32 = DataType( "FP32", - _lib.GrB_FP32, + lib.GrB_FP32, "GrB_FP32", "float", - _numba.types.float32 if _has_numba else None, - _np.float32, + numba.types.float32 if _has_numba else None, + np.float32, ) FP64 = DataType( "FP64", - _lib.GrB_FP64, + lib.GrB_FP64, "GrB_FP64", "double", - _numba.types.float64 if _has_numba else None, - _np.float64, + numba.types.float64 if _has_numba else None, + np.float64, ) -if _supports_complex and hasattr(_lib, "GxB_FC32"): +if _supports_complex and hasattr(lib, "GxB_FC32"): FC32 = DataType( "FC32", - _lib.GxB_FC32, + lib.GxB_FC32, "GxB_FC32", "float _Complex", - _numba.types.complex64 if _has_numba else None, - _np.complex64, + numba.types.complex64 if _has_numba else None, + np.complex64, ) -if _supports_complex and hasattr(_lib, "GrB_FC32"): # pragma: no cover (unused) +if _supports_complex and hasattr(lib, "GrB_FC32"): # pragma: no cover (unused) FC32 = DataType( "FC32", - _lib.GrB_FC32, + lib.GrB_FC32, "GrB_FC32", "float _Complex", - _numba.types.complex64 if _has_numba else None, - _np.complex64, + numba.types.complex64 if _has_numba else None, + np.complex64, ) -if _supports_complex and hasattr(_lib, "GxB_FC64"): +if _supports_complex and hasattr(lib, "GxB_FC64"): FC64 = DataType( "FC64", - _lib.GxB_FC64, + lib.GxB_FC64, "GxB_FC64", "double _Complex", - _numba.types.complex128 if _has_numba else None, - _np.complex128, + numba.types.complex128 if _has_numba else None, + np.complex128, ) -if _supports_complex and hasattr(_lib, "GrB_FC64"): # pragma: no cover (unused) +if _supports_complex and hasattr(lib, "GrB_FC64"): # pragma: no cover (unused) FC64 = DataType( "FC64", - _lib.GrB_FC64, + lib.GrB_FC64, "GrB_FC64", "double _Complex", - _numba.types.complex128 if _has_numba else None, - _np.complex128, + numba.types.complex128 if _has_numba else None, + np.complex128, ) # Used for testing user-defined functions _sample_values = { - INT8: _np.int8(1), - UINT8: _np.uint8(1), - INT16: _np.int16(1), - UINT16: _np.uint16(1), - INT32: _np.int32(1), - UINT32: _np.uint32(1), - INT64: _np.int64(1), - UINT64: _np.uint64(1), - FP32: _np.float32(0.5), - FP64: _np.float64(0.5), - BOOL: _np.bool_(True), + INT8: np.int8(1), + UINT8: np.uint8(1), + INT16: np.int16(1), + UINT16: np.uint16(1), + INT32: np.int32(1), + UINT32: np.uint32(1), + INT64: np.int64(1), + UINT64: np.uint64(1), + FP32: np.float32(0.5), + FP64: np.float64(0.5), + BOOL: np.bool_(True), } if _supports_complex: _sample_values.update( { - FC32: _np.complex64(complex(0, 0.5)), - FC64: _np.complex128(complex(0, 0.5)), + FC32: np.complex64(complex(0, 0.5)), + FC64: np.complex128(complex(0, 0.5)), } ) @@ -393,7 +389,7 @@ def unify(type1, type2, *, is_left_scalar=False, is_right_scalar=False): array_types = [] elif not is_right_scalar: # Using `promote_types` is faster than `find_common_type` - return lookup_dtype(_promote_types(type1.np_type, type2.np_type)) + return lookup_dtype(promote_types(type1.np_type, type2.np_type)) else: scalar_types = [] array_types = [type1.np_type] @@ -401,7 +397,7 @@ def unify(type1, type2, *, is_left_scalar=False, is_right_scalar=False): scalar_types.append(type2.np_type) else: array_types.append(type2.np_type) - return lookup_dtype(_find_common_type(array_types, scalar_types)) + return lookup_dtype(find_common_type(array_types, scalar_types)) def _default_name(dtype): @@ -431,7 +427,7 @@ def _dtype_to_string(dtype): >>> dtype == new_dtype True """ - if isinstance(dtype, _np.dtype) and dtype not in _registry: + if isinstance(dtype, np.dtype) and dtype not in _registry: np_type = dtype else: dtype = lookup_dtype(dtype) @@ -440,11 +436,11 @@ def _dtype_to_string(dtype): np_type = dtype.np_type s = str(np_type) try: - if _np.dtype(_np.lib.format.safe_eval(s)) == np_type: # pragma: no branch (safety) + if np.dtype(np.lib.format.safe_eval(s)) == np_type: # pragma: no branch (safety) return s except Exception: pass - if _np.dtype(np_type.str) != np_type: # pragma: no cover (safety) + if np.dtype(np_type.str) != np_type: # pragma: no cover (safety) raise ValueError(f"Unable to reliably convert dtype to string and back: {dtype}") return repr(np_type.str) @@ -459,5 +455,5 @@ def _string_to_dtype(s): return lookup_dtype(s) except Exception: pass - np_type = _np.dtype(_np.lib.format.safe_eval(s)) + np_type = np.dtype(np.lib.format.safe_eval(s)) return lookup_dtype(np_type) diff --git a/graphblas/tests/test_dtype.py b/graphblas/tests/test_dtype.py index 66c19cce5..233e9b65a 100644 --- a/graphblas/tests/test_dtype.py +++ b/graphblas/tests/test_dtype.py @@ -123,7 +123,7 @@ def test_dtype_bad_comparison(): def test_dtypes_match_numpy(): - for key, val in dtypes._registry.items(): + for key, val in dtypes._core._registry.items(): try: if key is int or (isinstance(key, str) and key == "int"): # For win64, numpy treats int as int32, not int64 @@ -137,7 +137,7 @@ def test_dtypes_match_numpy(): def test_pickle(): - for val in dtypes._registry.values(): + for val in dtypes._core._registry.values(): s = pickle.dumps(val) val2 = pickle.loads(s) if val._is_udt: # pragma: no cover @@ -205,7 +205,7 @@ def test_auto_register(): def test_default_names(): - from graphblas.dtypes import _default_name + from graphblas.dtypes._core import _default_name assert _default_name(np.dtype([("x", np.int32), ("y", np.float64)], align=True)) == ( "{'x': INT32, 'y': FP64}" @@ -230,9 +230,9 @@ def test_dtype_to_from_string(): except Exception: pass for dtype in types: - s = dtypes._dtype_to_string(dtype) + s = dtypes._core._dtype_to_string(dtype) try: - dtype2 = dtypes._string_to_dtype(s) + dtype2 = dtypes._core._string_to_dtype(s) except Exception: with pytest.raises(ValueError, match="Unknown dtype"): lookup_dtype(dtype) diff --git a/pyproject.toml b/pyproject.toml index 7fbdbb8b6..1369efbf8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -138,6 +138,7 @@ packages = [ "graphblas.core", "graphblas.core.operator", "graphblas.core.ss", + "graphblas.dtypes", "graphblas.indexunary", "graphblas.io", "graphblas.monoid", From 8ec612f6f68e695111a2ea3729dbeaa54a37cd2d Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Tue, 6 Jun 2023 21:34:13 -0500 Subject: [PATCH 07/40] bump ruff and mark more slow tests --- .pre-commit-config.yaml | 4 ++-- graphblas/tests/test_io.py | 2 ++ graphblas/tests/test_op.py | 1 + pyproject.toml | 1 + 4 files changed, 6 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 4d0e5c0b6..d8f70b1e1 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -66,7 +66,7 @@ repos: - id: black - id: black-jupyter - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.270 + rev: v0.0.271 hooks: - id: ruff args: [--fix-only, --show-fixes] @@ -94,7 +94,7 @@ repos: additional_dependencies: [tomli] files: ^(graphblas|docs)/ - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.270 + rev: v0.0.271 hooks: - id: ruff - repo: https://github.com/sphinx-contrib/sphinx-lint diff --git a/graphblas/tests/test_io.py b/graphblas/tests/test_io.py index df94c6469..f91d329e6 100644 --- a/graphblas/tests/test_io.py +++ b/graphblas/tests/test_io.py @@ -433,6 +433,7 @@ def test_awkward_errors(): @pytest.mark.skipif("not sparse") +@pytest.mark.slow def test_vector_to_from_pydata_sparse(): coords = np.array([0, 1, 2, 3, 4], dtype="int64") data = np.array([10, 20, 30, 40, 50], dtype="int64") @@ -446,6 +447,7 @@ def test_vector_to_from_pydata_sparse(): @pytest.mark.skipif("not sparse") +@pytest.mark.slow def test_matrix_to_from_pydata_sparse(): coords = np.array([[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]], dtype="int64") data = np.array([10, 20, 30, 40, 50], dtype="int64") diff --git a/graphblas/tests/test_op.py b/graphblas/tests/test_op.py index a80012ab7..85c7663f8 100644 --- a/graphblas/tests/test_op.py +++ b/graphblas/tests/test_op.py @@ -1433,6 +1433,7 @@ def test_deprecated(): import graphblas.core.agg # noqa: F401 +@pytest.mark.slow def test_is_idempotent(): assert monoid.min.is_idempotent assert monoid.max[int].is_idempotent diff --git a/pyproject.toml b/pyproject.toml index 1369efbf8..82bf296d1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -351,6 +351,7 @@ ignore = [ "TCH", # flake8-type-checking (Note: figure out type checking later) "ARG", # flake8-unused-arguments (Sometimes helpful, but too strict) "TD", # flake8-todos (Maybe okay to add some of these) + "T", "T001", "T002", "T003", "T004", # flake8-fixme (like flake8-todos) "ERA", # eradicate (We like code in comments!) "PD", # pandas-vet (Intended for scripts that use pandas, not libraries) ] From 39961988f36734a0235109286190c003c1f6f826 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Mon, 12 Jun 2023 16:33:08 -0500 Subject: [PATCH 08/40] moar --- .pre-commit-config.yaml | 8 ++--- graphblas/core/ss/dtypes.py | 61 ++++++++++++++++++++++++++++++++++++ graphblas/core/ss/unary.py | 51 ++++++++++++++++++++++++++++++ graphblas/dtypes/__init__.py | 20 ++++++++++++ graphblas/dtypes/_core.py | 4 ++- graphblas/dtypes/ss.py | 1 + graphblas/unary/ss.py | 1 + pyproject.toml | 2 +- scripts/check_versions.sh | 2 +- 9 files changed, 143 insertions(+), 7 deletions(-) create mode 100644 graphblas/core/ss/dtypes.py create mode 100644 graphblas/core/ss/unary.py create mode 100644 graphblas/dtypes/ss.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d8f70b1e1..24cf9fe35 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -21,7 +21,7 @@ repos: - id: check-added-large-files - id: check-case-conflict - id: check-merge-conflict - - id: check-symlinks + # - id: check-symlinks - id: check-ast - id: check-toml - id: check-yaml @@ -66,7 +66,7 @@ repos: - id: black - id: black-jupyter - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.271 + rev: v0.0.272 hooks: - id: ruff args: [--fix-only, --show-fixes] @@ -79,7 +79,7 @@ repos: additional_dependencies: &flake8_dependencies # These versions need updated manually - flake8==6.0.0 - - flake8-bugbear==23.5.9 + - flake8-bugbear==23.6.5 - flake8-simplify==0.20.0 - repo: https://github.com/asottile/yesqa rev: v1.4.0 @@ -94,7 +94,7 @@ repos: additional_dependencies: [tomli] files: ^(graphblas|docs)/ - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.271 + rev: v0.0.272 hooks: - id: ruff - repo: https://github.com/sphinx-contrib/sphinx-lint diff --git a/graphblas/core/ss/dtypes.py b/graphblas/core/ss/dtypes.py new file mode 100644 index 000000000..c052823df --- /dev/null +++ b/graphblas/core/ss/dtypes.py @@ -0,0 +1,61 @@ +from ... import backend, dtypes +from ...exceptions import check_status_carg +from .. import _has_numba, ffi, lib + +ffi_new = ffi.new +if _has_numba: + from cffi import FFI + from numba.core.typing import cffi_utils + + jit_ffi = FFI() + + +def register_new(name, jit_c_definition): + if backend != "suitesparse": + raise RuntimeError( + "`gb.dtypes.ss.register_new` invalid when not using 'suitesparse' backend" + ) + if not name.isidentifier(): + raise ValueError(f"`name` argument must be a valid Python identifier; got: {name!r}") + if name in dtypes._core._registry or hasattr(dtypes, name): + raise ValueError(f"{name!r} name for dtype is unavailable") + if len(name) > lib.GxB_MAX_NAME_LEN: + raise ValueError( + f"`name` argument is too large. Max size is {lib.GxB_MAX_NAME_LEN}; got {len(name)}" + ) + if name not in jit_c_definition: + raise ValueError("`name` argument must be same name as the typedef in `jit_c_definition`") + if "struct" not in jit_c_definition: + raise ValueError("Only struct typedefs are currently allowed for JIT dtypes") + + gb_obj = ffi.new("GrB_Type*") + status = lib.GxB_Type_new( + gb_obj, 0, ffi_new("char[]", name.encode()), ffi_new("char[]", jit_c_definition.encode()) + ) + check_status_carg(status, "Type", gb_obj[0]) + + # Let SuiteSparse:GraphBLAS determine the size (we gave 0 as size above) + size_ptr = ffi_new("size_t*") + check_status_carg(lib.GxB_Type_size(size_ptr, gb_obj[0]), "Type", gb_obj[0]) + size = size_ptr[0] + + if _has_numba: + jit_ffi.cdef(jit_c_definition) + numba_type = cffi_utils.map_type(jit_ffi.typeof(name), use_record_dtype=True) + np_type = numba_type.dtype + if np_type.itemsize != size: # pragma: no cover + # TODO: Should we warn or raise? + numba_type = np_type = None + else: + # Instead of None, should we make these e.g. np.dtype((np.uint8, size))`? + numba_type = np_type = None + + # For now, let's use "opaque" unsigned bytes for the c type. + rv = dtypes._core.DataType(name, gb_obj, None, f"uint8_t[{size}]", numba_type, np_type) + dtypes._core._registry[gb_obj] = rv + if _has_numba: + dtypes._core._registry[np_type] = rv + dtypes._core._registry[numba_type] = rv + dtypes._core._registry[numba_type.name] = rv + setattr(dtypes, name, rv) + return rv diff --git a/graphblas/core/ss/unary.py b/graphblas/core/ss/unary.py new file mode 100644 index 000000000..c6af11029 --- /dev/null +++ b/graphblas/core/ss/unary.py @@ -0,0 +1,51 @@ +from ... import backend +from ...dtypes import lookup_dtype +from ...exceptions import check_status_carg +from .. import NULL, ffi, lib +from ..operator.base import TypedOpBase +from ..operator.unary import TypedBuiltinUnaryOp, UnaryOp + +ffi_new = ffi.new + + +class TypedJitUnaryOp(TypedOpBase): + __slots__ = "_jit_c_definition" + opclass = "UnaryOp" + + def __init__(self, parent, name, type_, return_type, gb_obj, jit_c_definition): + super().__init__(parent, name, type_, return_type, gb_obj, name) + self._jit_c_definition = jit_c_definition + + @property + def jit_c_definition(self): + return self._jit_c_definition + + __call__ = TypedBuiltinUnaryOp.__call__ + + +def register_new(name, jit_c_definition, input_type, ret_type): + if backend != "suitesparse": + raise RuntimeError( + "`gb.unary.ss.register_new` invalid when not using 'suitesparse' backend" + ) + input_type = lookup_dtype(input_type) + ret_type = lookup_dtype(ret_type) + module, funcname = UnaryOp._remove_nesting(name) + + rv = UnaryOp(name) + gb_obj = ffi_new("GrB_UnaryOp*") + check_status_carg( + lib.GxB_UnaryOp_new( + gb_obj, + NULL, + ret_type._carg, + input_type._carg, + ffi_new("char[]", funcname.encode()), + ffi_new("char[]", jit_c_definition.encode()), + ), + "UnaryOp", + gb_obj, + ) + op = TypedJitUnaryOp(rv, funcname, input_type, ret_type, gb_obj[0], jit_c_definition) + rv._add(op) + return rv diff --git a/graphblas/dtypes/__init__.py b/graphblas/dtypes/__init__.py index 033557ece..0d26a44a0 100644 --- a/graphblas/dtypes/__init__.py +++ b/graphblas/dtypes/__init__.py @@ -21,3 +21,23 @@ if _supports_complex: from ._core import FC32, FC64 + + +def __dir__(): + return globals().keys() | {"ss"} + + +def __getattr__(key): + if key == "ss": + from .. import backend + + if backend != "suitesparse": + raise AttributeError( + f'module {__name__!r} only has attribute "ss" when backend is "suitesparse"' + ) + from importlib import import_module + + ss = import_module(".ss", __name__) + globals()["ss"] = ss + return ss + raise AttributeError(f"module {__name__!r} has no attribute {key!r}") diff --git a/graphblas/dtypes/_core.py b/graphblas/dtypes/_core.py index 8918f5aa1..33e4be1a8 100644 --- a/graphblas/dtypes/_core.py +++ b/graphblas/dtypes/_core.py @@ -90,7 +90,9 @@ def register_anonymous(dtype, name=None): except TypeError: if isinstance(dtype, dict): # Allow dtypes such as `{'x': int, 'y': float}` for convenience - dtype = np.dtype([(key, lookup_dtype(val).np_type) for key, val in dtype.items()]) + dtype = np.dtype( + [(key, lookup_dtype(val).np_type) for key, val in dtype.items()], align=True + ) elif isinstance(dtype, str) and "[" in dtype and dtype.endswith("]"): # Allow dtypes such as `"INT64[3, 4]"` for convenience base_dtype, shape = dtype.split("[", 1) diff --git a/graphblas/dtypes/ss.py b/graphblas/dtypes/ss.py new file mode 100644 index 000000000..9f6083e01 --- /dev/null +++ b/graphblas/dtypes/ss.py @@ -0,0 +1 @@ +from ..core.ss.dtypes import register_new # noqa: F401 diff --git a/graphblas/unary/ss.py b/graphblas/unary/ss.py index e45cbcda0..300d92271 100644 --- a/graphblas/unary/ss.py +++ b/graphblas/unary/ss.py @@ -1,3 +1,4 @@ from ..core import operator +from ..core.ss.unary import register_new del operator diff --git a/pyproject.toml b/pyproject.toml index 82bf296d1..7c044be4e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -351,7 +351,7 @@ ignore = [ "TCH", # flake8-type-checking (Note: figure out type checking later) "ARG", # flake8-unused-arguments (Sometimes helpful, but too strict) "TD", # flake8-todos (Maybe okay to add some of these) - "T", "T001", "T002", "T003", "T004", # flake8-fixme (like flake8-todos) + "FIX", # flake8-fixme (like flake8-todos) "ERA", # eradicate (We like code in comments!) "PD", # pandas-vet (Intended for scripts that use pandas, not libraries) ] diff --git a/scripts/check_versions.sh b/scripts/check_versions.sh index af72f9655..552b3b57c 100755 --- a/scripts/check_versions.sh +++ b/scripts/check_versions.sh @@ -12,6 +12,6 @@ conda search 'sparse[channel=conda-forge]>=0.14.0' conda search 'fast_matrix_market[channel=conda-forge]>=1.6.0' conda search 'numba[channel=conda-forge]>=0.57.0' conda search 'pyyaml[channel=conda-forge]>=6.0' -conda search 'flake8-bugbear[channel=conda-forge]>=23.5.9' +conda search 'flake8-bugbear[channel=conda-forge]>=23.6.5' conda search 'flake8-simplify[channel=conda-forge]>=0.20.0' # conda search 'python[channel=conda-forge]>=3.8 *pypy*' From e44c08f99945e85fa8ee270e85bee5abbc6ec466 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Sun, 18 Jun 2023 15:26:36 -0500 Subject: [PATCH 09/40] JIT for binary and indexunary operators (untested, undocumented) --- .pre-commit-config.yaml | 6 ++-- graphblas/binary/ss.py | 1 + graphblas/core/ss/binary.py | 61 ++++++++++++++++++++++++++++++++ graphblas/core/ss/unary.py | 4 +-- graphblas/indexunary/__init__.py | 14 +++++++- graphblas/indexunary/ss.py | 4 +++ graphblas/unary/ss.py | 2 +- scripts/check_versions.sh | 2 +- 8 files changed, 86 insertions(+), 8 deletions(-) create mode 100644 graphblas/core/ss/binary.py create mode 100644 graphblas/indexunary/ss.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 24cf9fe35..4eea77211 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -51,7 +51,7 @@ repos: - id: isort # Let's keep `pyupgrade` even though `ruff --fix` probably does most of it - repo: https://github.com/asottile/pyupgrade - rev: v3.4.0 + rev: v3.6.0 hooks: - id: pyupgrade args: [--py38-plus] @@ -82,12 +82,12 @@ repos: - flake8-bugbear==23.6.5 - flake8-simplify==0.20.0 - repo: https://github.com/asottile/yesqa - rev: v1.4.0 + rev: v1.5.0 hooks: - id: yesqa additional_dependencies: *flake8_dependencies - repo: https://github.com/codespell-project/codespell - rev: v2.2.4 + rev: v2.2.5 hooks: - id: codespell types_or: [python, rst, markdown] diff --git a/graphblas/binary/ss.py b/graphblas/binary/ss.py index e45cbcda0..3228a0137 100644 --- a/graphblas/binary/ss.py +++ b/graphblas/binary/ss.py @@ -1,3 +1,4 @@ from ..core import operator +from ..core.ss.binary import register_new # noqa: F401 del operator diff --git a/graphblas/core/ss/binary.py b/graphblas/core/ss/binary.py new file mode 100644 index 000000000..07fd64f13 --- /dev/null +++ b/graphblas/core/ss/binary.py @@ -0,0 +1,61 @@ +from ... import backend +from ...dtypes import lookup_dtype +from ...exceptions import check_status_carg +from .. import NULL, ffi, lib +from ..operator.base import TypedOpBase +from ..operator.binary import BinaryOp, TypedUserBinaryOp + +ffi_new = ffi.new + + +class TypedJitBinaryOp(TypedOpBase): + __slots__ = "_monoid", "_jit_c_definition" + opclass = "BinaryOp" + + def __init__(self, parent, name, type_, return_type, gb_obj, jit_c_definition, dtype2=None): + super().__init__(parent, name, type_, return_type, gb_obj, name, dtype2=dtype2) + self._monoid = None + self._jit_c_definition = jit_c_definition + + @property + def jit_c_definition(self): + return self._jit_c_definition + + monoid = TypedUserBinaryOp.monoid + commutes_to = TypedUserBinaryOp.commutes_to + _semiring_commutes_to = TypedUserBinaryOp._semiring_commutes_to + is_commutative = TypedUserBinaryOp.is_commutative + type2 = TypedUserBinaryOp.type2 + __call__ = TypedUserBinaryOp.__call__ + + +def register_new(name, jit_c_definition, left_type, right_type, ret_type): + if backend != "suitesparse": + raise RuntimeError( + "`gb.binary.ss.register_new` invalid when not using 'suitesparse' backend" + ) + left_type = lookup_dtype(left_type) + right_type = lookup_dtype(right_type) + ret_type = lookup_dtype(ret_type) + module, funcname = BinaryOp._remove_nesting(name) + + rv = BinaryOp(name) + gb_obj = ffi_new("GrB_BinaryOp*") + check_status_carg( + lib.GxB_BinaryOp_new( + gb_obj, + NULL, + ret_type._carg, + left_type._carg, + right_type._carg, + ffi_new("char[]", funcname.encode()), + ffi_new("char[]", jit_c_definition.encode()), + ), + "BinaryOp", + gb_obj, + ) + op = TypedJitBinaryOp( + rv, funcname, left_type, ret_type, gb_obj[0], jit_c_definition, dtype2=right_type + ) + rv._add(op) + return rv diff --git a/graphblas/core/ss/unary.py b/graphblas/core/ss/unary.py index c6af11029..3c9980a2a 100644 --- a/graphblas/core/ss/unary.py +++ b/graphblas/core/ss/unary.py @@ -3,7 +3,7 @@ from ...exceptions import check_status_carg from .. import NULL, ffi, lib from ..operator.base import TypedOpBase -from ..operator.unary import TypedBuiltinUnaryOp, UnaryOp +from ..operator.unary import TypedUserUnaryOp, UnaryOp ffi_new = ffi.new @@ -20,7 +20,7 @@ def __init__(self, parent, name, type_, return_type, gb_obj, jit_c_definition): def jit_c_definition(self): return self._jit_c_definition - __call__ = TypedBuiltinUnaryOp.__call__ + __call__ = TypedUserUnaryOp.__call__ def register_new(name, jit_c_definition, input_type, ret_type): diff --git a/graphblas/indexunary/__init__.py b/graphblas/indexunary/__init__.py index 472231597..a3cb06608 100644 --- a/graphblas/indexunary/__init__.py +++ b/graphblas/indexunary/__init__.py @@ -4,7 +4,7 @@ def __dir__(): - return globals().keys() | _delayed.keys() + return globals().keys() | _delayed.keys() | {"ss"} def __getattr__(key): @@ -13,6 +13,18 @@ def __getattr__(key): rv = func(**kwargs) globals()[key] = rv return rv + if key == "ss": + from .. import backend + + if backend != "suitesparse": + raise AttributeError( + f'module {__name__!r} only has attribute "ss" when backend is "suitesparse"' + ) + from importlib import import_module + + ss = import_module(".ss", __name__) + globals()["ss"] = ss + return ss raise AttributeError(f"module {__name__!r} has no attribute {key!r}") diff --git a/graphblas/indexunary/ss.py b/graphblas/indexunary/ss.py new file mode 100644 index 000000000..370367560 --- /dev/null +++ b/graphblas/indexunary/ss.py @@ -0,0 +1,4 @@ +from ..core import operator +from ..core.ss.indexunary import register_new # noqa: F401 + +del operator diff --git a/graphblas/unary/ss.py b/graphblas/unary/ss.py index 300d92271..5307961bb 100644 --- a/graphblas/unary/ss.py +++ b/graphblas/unary/ss.py @@ -1,4 +1,4 @@ from ..core import operator -from ..core.ss.unary import register_new +from ..core.ss.unary import register_new # noqa: F401 del operator diff --git a/scripts/check_versions.sh b/scripts/check_versions.sh index 552b3b57c..9a83b2127 100755 --- a/scripts/check_versions.sh +++ b/scripts/check_versions.sh @@ -7,7 +7,7 @@ conda search 'numpy[channel=conda-forge]>=1.24.3' conda search 'pandas[channel=conda-forge]>=2.0.2' conda search 'scipy[channel=conda-forge]>=1.10.1' conda search 'networkx[channel=conda-forge]>=3.1' -conda search 'awkward[channel=conda-forge]>=2.2.1' +conda search 'awkward[channel=conda-forge]>=2.2.2' conda search 'sparse[channel=conda-forge]>=0.14.0' conda search 'fast_matrix_market[channel=conda-forge]>=1.6.0' conda search 'numba[channel=conda-forge]>=0.57.0' From aa627d001a84be9eb69aa938533768ff5d3a8d09 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Sun, 18 Jun 2023 15:27:54 -0500 Subject: [PATCH 10/40] oops forgot to add file --- graphblas/core/ss/indexunary.py | 55 +++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 graphblas/core/ss/indexunary.py diff --git a/graphblas/core/ss/indexunary.py b/graphblas/core/ss/indexunary.py new file mode 100644 index 000000000..7ae0d2792 --- /dev/null +++ b/graphblas/core/ss/indexunary.py @@ -0,0 +1,55 @@ +from ... import backend +from ...dtypes import lookup_dtype +from ...exceptions import check_status_carg +from .. import NULL, ffi, lib +from ..operator.base import TypedOpBase +from ..operator.indexunary import IndexUnaryOp, TypedUserIndexUnaryOp + +ffi_new = ffi.new + + +class TypedJitIndexUnaryOp(TypedOpBase): + __slots__ = "_jit_c_definition" + opclass = "IndexUnaryOp" + + def __init__(self, parent, name, type_, return_type, gb_obj, jit_c_definition, dtype2=None): + super().__init__(parent, name, type_, return_type, gb_obj, name, dtype2=dtype2) + self._jit_c_definition = jit_c_definition + + @property + def jit_c_definition(self): + return self._jit_c_definition + + __call__ = TypedUserIndexUnaryOp.__call__ + + +def register_new(name, jit_c_definition, input_type, thunk_type, ret_type): + if backend != "suitesparse": + raise RuntimeError( + "`gb.indexunary.ss.register_new` invalid when not using 'suitesparse' backend" + ) + input_type = lookup_dtype(input_type) + thunk_type = lookup_dtype(thunk_type) + ret_type = lookup_dtype(ret_type) + module, funcname = IndexUnaryOp._remove_nesting(name) + + rv = IndexUnaryOp(name) + gb_obj = ffi_new("GrB_IndexUnaryOp*") + check_status_carg( + lib.GxB_IndexUnaryOp_new( + gb_obj, + NULL, + ret_type._carg, + input_type._carg, + thunk_type._carg, + ffi_new("char[]", funcname.encode()), + ffi_new("char[]", jit_c_definition.encode()), + ), + "IndexUnaryOp", + gb_obj, + ) + op = TypedJitIndexUnaryOp( + rv, funcname, input_type, ret_type, gb_obj[0], jit_c_definition, dtype2=thunk_type + ) + rv._add(op) + return rv From 620735c6d51d856ccd023b71d76c628f2b98d8a9 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Tue, 20 Jun 2023 21:56:04 -0500 Subject: [PATCH 11/40] Add test for suitesparse jit --- graphblas/binary/ss.py | 2 + graphblas/core/ss/binary.py | 3 + graphblas/core/ss/dtypes.py | 4 +- graphblas/core/ss/indexunary.py | 16 +++- graphblas/core/ss/select.py | 37 +++++++++ graphblas/core/ss/unary.py | 3 + graphblas/indexunary/ss.py | 2 + graphblas/op/ss.py | 2 + graphblas/select/__init__.py | 14 +++- graphblas/select/ss.py | 6 ++ graphblas/semiring/ss.py | 2 + graphblas/tests/test_ssjit.py | 131 ++++++++++++++++++++++++++++++++ graphblas/unary/ss.py | 2 + 13 files changed, 220 insertions(+), 4 deletions(-) create mode 100644 graphblas/core/ss/select.py create mode 100644 graphblas/select/ss.py create mode 100644 graphblas/tests/test_ssjit.py diff --git a/graphblas/binary/ss.py b/graphblas/binary/ss.py index 3228a0137..0c294e322 100644 --- a/graphblas/binary/ss.py +++ b/graphblas/binary/ss.py @@ -1,4 +1,6 @@ from ..core import operator from ..core.ss.binary import register_new # noqa: F401 +_delayed = {} + del operator diff --git a/graphblas/core/ss/binary.py b/graphblas/core/ss/binary.py index 07fd64f13..cb0003088 100644 --- a/graphblas/core/ss/binary.py +++ b/graphblas/core/ss/binary.py @@ -37,6 +37,8 @@ def register_new(name, jit_c_definition, left_type, right_type, ret_type): left_type = lookup_dtype(left_type) right_type = lookup_dtype(right_type) ret_type = lookup_dtype(ret_type) + if not name.startswith("ss."): + name = f"ss.{name}" module, funcname = BinaryOp._remove_nesting(name) rv = BinaryOp(name) @@ -58,4 +60,5 @@ def register_new(name, jit_c_definition, left_type, right_type, ret_type): rv, funcname, left_type, ret_type, gb_obj[0], jit_c_definition, dtype2=right_type ) rv._add(op) + setattr(module, funcname, rv) return rv diff --git a/graphblas/core/ss/dtypes.py b/graphblas/core/ss/dtypes.py index c052823df..259276d8e 100644 --- a/graphblas/core/ss/dtypes.py +++ b/graphblas/core/ss/dtypes.py @@ -17,7 +17,7 @@ def register_new(name, jit_c_definition): ) if not name.isidentifier(): raise ValueError(f"`name` argument must be a valid Python identifier; got: {name!r}") - if name in dtypes._core._registry or hasattr(dtypes, name): + if name in dtypes._core._registry or hasattr(dtypes.ss, name): raise ValueError(f"{name!r} name for dtype is unavailable") if len(name) > lib.GxB_MAX_NAME_LEN: raise ValueError( @@ -57,5 +57,5 @@ def register_new(name, jit_c_definition): dtypes._core._registry[np_type] = rv dtypes._core._registry[numba_type] = rv dtypes._core._registry[numba_type.name] = rv - setattr(dtypes, name, rv) + setattr(dtypes.ss, name, rv) return rv diff --git a/graphblas/core/ss/indexunary.py b/graphblas/core/ss/indexunary.py index 7ae0d2792..e53a745d6 100644 --- a/graphblas/core/ss/indexunary.py +++ b/graphblas/core/ss/indexunary.py @@ -1,5 +1,5 @@ from ... import backend -from ...dtypes import lookup_dtype +from ...dtypes import BOOL, lookup_dtype from ...exceptions import check_status_carg from .. import NULL, ffi, lib from ..operator.base import TypedOpBase @@ -31,6 +31,8 @@ def register_new(name, jit_c_definition, input_type, thunk_type, ret_type): input_type = lookup_dtype(input_type) thunk_type = lookup_dtype(thunk_type) ret_type = lookup_dtype(ret_type) + if not name.startswith("ss."): + name = f"ss.{name}" module, funcname = IndexUnaryOp._remove_nesting(name) rv = IndexUnaryOp(name) @@ -52,4 +54,16 @@ def register_new(name, jit_c_definition, input_type, thunk_type, ret_type): rv, funcname, input_type, ret_type, gb_obj[0], jit_c_definition, dtype2=thunk_type ) rv._add(op) + if ret_type == BOOL: + from ..operator.select import SelectOp + from .select import TypedJitSelectOp + + select_module, funcname = SelectOp._remove_nesting(name, strict=False) + selectop = SelectOp(name) + op2 = TypedJitSelectOp( + rv, funcname, input_type, ret_type, gb_obj[0], jit_c_definition, dtype2=thunk_type + ) + selectop._add(op2) + setattr(select_module, funcname, selectop) + setattr(module, funcname, rv) return rv diff --git a/graphblas/core/ss/select.py b/graphblas/core/ss/select.py new file mode 100644 index 000000000..ee9dfb004 --- /dev/null +++ b/graphblas/core/ss/select.py @@ -0,0 +1,37 @@ +from ... import backend, indexunary +from ...dtypes import BOOL, lookup_dtype +from .. import ffi +from ..operator.base import TypedOpBase +from ..operator.select import SelectOp, TypedUserSelectOp + +ffi_new = ffi.new + + +class TypedJitSelectOp(TypedOpBase): + __slots__ = "_jit_c_definition" + opclass = "SelectOp" + + def __init__(self, parent, name, type_, return_type, gb_obj, jit_c_definition, dtype2=None): + super().__init__(parent, name, type_, return_type, gb_obj, name, dtype2=dtype2) + self._jit_c_definition = jit_c_definition + + @property + def jit_c_definition(self): + return self._jit_c_definition + + __call__ = TypedUserSelectOp.__call__ + + +def register_new(name, jit_c_definition, input_type, thunk_type): + if backend != "suitesparse": + raise RuntimeError( + "`gb.select.ss.register_new` invalid when not using 'suitesparse' backend" + ) + input_type = lookup_dtype(input_type) + thunk_type = lookup_dtype(thunk_type) + if not name.startswith("ss."): + name = f"ss.{name}" + # Register to both `gb.indexunary.ss` and `gb.select.ss.` + indexunary.ss.register_new(name, jit_c_definition, input_type, thunk_type, BOOL) + module, funcname = SelectOp._remove_nesting(name, strict=False) + return getattr(module, funcname) diff --git a/graphblas/core/ss/unary.py b/graphblas/core/ss/unary.py index 3c9980a2a..943177cbd 100644 --- a/graphblas/core/ss/unary.py +++ b/graphblas/core/ss/unary.py @@ -30,6 +30,8 @@ def register_new(name, jit_c_definition, input_type, ret_type): ) input_type = lookup_dtype(input_type) ret_type = lookup_dtype(ret_type) + if not name.startswith("ss."): + name = f"ss.{name}" module, funcname = UnaryOp._remove_nesting(name) rv = UnaryOp(name) @@ -48,4 +50,5 @@ def register_new(name, jit_c_definition, input_type, ret_type): ) op = TypedJitUnaryOp(rv, funcname, input_type, ret_type, gb_obj[0], jit_c_definition) rv._add(op) + setattr(module, funcname, rv) return rv diff --git a/graphblas/indexunary/ss.py b/graphblas/indexunary/ss.py index 370367560..58218df6f 100644 --- a/graphblas/indexunary/ss.py +++ b/graphblas/indexunary/ss.py @@ -1,4 +1,6 @@ from ..core import operator from ..core.ss.indexunary import register_new # noqa: F401 +_delayed = {} + del operator diff --git a/graphblas/op/ss.py b/graphblas/op/ss.py index e45cbcda0..97852fc12 100644 --- a/graphblas/op/ss.py +++ b/graphblas/op/ss.py @@ -1,3 +1,5 @@ from ..core import operator +_delayed = {} + del operator diff --git a/graphblas/select/__init__.py b/graphblas/select/__init__.py index 72aa8d226..aaf8e12d0 100644 --- a/graphblas/select/__init__.py +++ b/graphblas/select/__init__.py @@ -8,7 +8,7 @@ def __dir__(): - return globals().keys() | _delayed.keys() + return globals().keys() | _delayed.keys() | {"ss"} def __getattr__(key): @@ -17,6 +17,18 @@ def __getattr__(key): rv = func(**kwargs) globals()[key] = rv return rv + if key == "ss": + from .. import backend + + if backend != "suitesparse": + raise AttributeError( + f'module {__name__!r} only has attribute "ss" when backend is "suitesparse"' + ) + from importlib import import_module + + ss = import_module(".ss", __name__) + globals()["ss"] = ss + return ss raise AttributeError(f"module {__name__!r} has no attribute {key!r}") diff --git a/graphblas/select/ss.py b/graphblas/select/ss.py new file mode 100644 index 000000000..173067382 --- /dev/null +++ b/graphblas/select/ss.py @@ -0,0 +1,6 @@ +from ..core import operator +from ..core.ss.select import register_new # noqa: F401 + +_delayed = {} + +del operator diff --git a/graphblas/semiring/ss.py b/graphblas/semiring/ss.py index e45cbcda0..97852fc12 100644 --- a/graphblas/semiring/ss.py +++ b/graphblas/semiring/ss.py @@ -1,3 +1,5 @@ from ..core import operator +_delayed = {} + del operator diff --git a/graphblas/tests/test_ssjit.py b/graphblas/tests/test_ssjit.py new file mode 100644 index 000000000..455e32dc3 --- /dev/null +++ b/graphblas/tests/test_ssjit.py @@ -0,0 +1,131 @@ +import numpy as np +import pytest +from numpy.testing import assert_array_equal + +from graphblas import backend, binary, dtypes, indexunary, select, unary +from graphblas.core import _supports_udfs as supports_udfs + +from .conftest import autocompute + +from graphblas import Vector # isort:skip (for dask-graphblas) + +if backend != "suitesparse": + pytest.skip("not suitesparse", allow_module_level=True) + + +@pytest.fixture +def v(): + return Vector.from_coo([1, 3, 4, 6], [1, 1, 2, 0]) + + +@autocompute +def test_jit_udt(): + dtype = dtypes.ss.register_new( + "myquaternion", "typedef struct { float x [4][4] ; int color ; } myquaternion ;" + ) + assert not hasattr(dtypes, "myquaternion") + assert dtypes.ss.myquaternion is dtype + assert dtype.name == "myquaternion" + assert str(dtype) == "myquaternion" + assert dtype.gb_name is None + assert dtype.np_type == np.dtype([("x", " Date: Wed, 21 Jun 2023 13:06:43 -0500 Subject: [PATCH 12/40] Bump pre-commit --- .pre-commit-config.yaml | 6 +++--- pyproject.toml | 2 ++ 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 4eea77211..a3a7597ed 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -51,7 +51,7 @@ repos: - id: isort # Let's keep `pyupgrade` even though `ruff --fix` probably does most of it - repo: https://github.com/asottile/pyupgrade - rev: v3.6.0 + rev: v3.7.0 hooks: - id: pyupgrade args: [--py38-plus] @@ -66,7 +66,7 @@ repos: - id: black - id: black-jupyter - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.272 + rev: v0.0.274 hooks: - id: ruff args: [--fix-only, --show-fixes] @@ -94,7 +94,7 @@ repos: additional_dependencies: [tomli] files: ^(graphblas|docs)/ - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.272 + rev: v0.0.274 hooks: - id: ruff - repo: https://github.com/sphinx-contrib/sphinx-lint diff --git a/pyproject.toml b/pyproject.toml index 7c044be4e..531f319b1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -306,6 +306,7 @@ ignore = [ # "SIM401", # Use dict.get ... instead of if-else-block (Note: if-else better for coverage and sometimes clearer) "TRY004", # Prefer `TypeError` exception for invalid type (Note: good advice, but not worth the nuisance) "TRY200", # Use `raise from` to specify exception cause (Note: sometimes okay to raise original exception) + "RUF012", # Mutable class attributes should be annotated with `typing.ClassVar` (Note: no annotations yet) # Intentionally ignored "COM812", # Trailing comma missing @@ -317,6 +318,7 @@ ignore = [ "N806", # Variable ... in function should be lowercase "N807", # Function name should not start and end with `__` "N818", # Exception name ... should be named with an Error suffix (Note: good advice) + "PLR0124", # Name compared with itself, consider replacing `x == x` (Note: too strict) "PLR0911", # Too many return statements "PLR0912", # Too many branches "PLR0913", # Too many arguments to function call From 0e6a217cb45ccf13f1c09fdbff8fc36d3572d67c Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Fri, 23 Jun 2023 13:02:07 -0500 Subject: [PATCH 13/40] bump ruff --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a3a7597ed..d89c69f8e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -66,7 +66,7 @@ repos: - id: black - id: black-jupyter - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.274 + rev: v0.0.275 hooks: - id: ruff args: [--fix-only, --show-fixes] @@ -94,7 +94,7 @@ repos: additional_dependencies: [tomli] files: ^(graphblas|docs)/ - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.274 + rev: v0.0.275 hooks: - id: ruff - repo: https://github.com/sphinx-contrib/sphinx-lint From b4234e921864f154d6bc863920baff5fecd2e41b Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Mon, 26 Jun 2023 22:43:45 -0500 Subject: [PATCH 14/40] Handle scipy 1.11.0 --- .github/workflows/test_and_build.yml | 10 +++++----- .pre-commit-config.yaml | 2 +- graphblas/io/_matrixmarket.py | 3 +-- graphblas/tests/test_io.py | 16 ++++++++++++---- scripts/check_versions.sh | 8 ++++---- 5 files changed, 23 insertions(+), 16 deletions(-) diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index 358f94a64..4da73a67a 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -170,7 +170,7 @@ jobs: nxver=$(python -c 'import random ; print(random.choice(["=2.7", "=2.8", "=3.0", "=3.1", ""]))') yamlver=$(python -c 'import random ; print(random.choice(["=5.4", "=6.0", ""]))') sparsever=$(python -c 'import random ; print(random.choice(["=0.13", "=0.14", ""]))') - fmmver=$(python -c 'import random ; print(random.choice(["=1.4", "=1.5", "=1.6", ""]))') + fmmver=$(python -c 'import random ; print(random.choice(["=1.4", "=1.5", "=1.6", "=1.7", ""]))') if [[ ${{ startsWith(steps.pyver.outputs.selected, '3.8') }} == true ]]; then npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", ""]))') spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", ""]))') @@ -178,17 +178,17 @@ jobs: akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", ""]))') elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.9') }} == true ]]; then npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", "=1.25", ""]))') - spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", ""]))') + spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", "=1.11", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=1.2", "=1.3", "=1.4", "=1.5", "=2.0", ""]))') akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", ""]))') elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.10') }} == true ]]; then npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", "=1.25", ""]))') - spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", ""]))') + spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", "=1.11", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=1.3", "=1.4", "=1.5", "=2.0", ""]))') akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", ""]))') else # Python 3.11 npver=$(python -c 'import random ; print(random.choice(["=1.23", "=1.24", "=1.25", ""]))') - spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", ""]))') + spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=1.5", "=2.0", ""]))') akver=$(python -c 'import random ; print(random.choice(["=1.10", "=2.0", "=2.1", "=2.2", ""]))') fi @@ -217,7 +217,7 @@ jobs: if [[ ${npver} == "=1.25" ]] ; then numbaver="" if [[ ${spver} == "=1.8" ]] ; then - spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", ""]))') + spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", ""]))') fi elif [[ ${npver} == "=1.24" || ${{ startsWith(steps.pyver.outputs.selected, '3.11') }} == true ]] ; then numbaver=$(python -c 'import random ; print(random.choice(["=0.57", ""]))') diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d89c69f8e..f0ca307e8 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -39,7 +39,7 @@ repos: name: Validate pyproject.toml # I don't yet trust ruff to do what autoflake does - repo: https://github.com/PyCQA/autoflake - rev: v2.1.1 + rev: v2.2.0 hooks: - id: autoflake args: [--in-place] diff --git a/graphblas/io/_matrixmarket.py b/graphblas/io/_matrixmarket.py index 294bcfa1e..0946580c8 100644 --- a/graphblas/io/_matrixmarket.py +++ b/graphblas/io/_matrixmarket.py @@ -36,7 +36,6 @@ def mmread(source, engine="auto", *, dup_op=None, name=None, **kwargs): try: # scipy is currently needed for *all* engines from scipy.io import mmread - from scipy.sparse import isspmatrix_coo except ImportError: # pragma: no cover (import) raise ImportError("scipy is required to read Matrix Market files") from None engine = engine.lower() @@ -54,7 +53,7 @@ def mmread(source, engine="auto", *, dup_op=None, name=None, **kwargs): f'Bad engine value: {engine!r}. Must be "auto", "scipy", "fmm", or "fast_matrix_market"' ) array = mmread(source, **kwargs) - if isspmatrix_coo(array): + if array.format == "coo": nrows, ncols = array.shape return Matrix.from_coo( array.row, array.col, array.data, nrows=nrows, ncols=ncols, dup_op=dup_op, name=name diff --git a/graphblas/tests/test_io.py b/graphblas/tests/test_io.py index b2b102374..bf2ca2015 100644 --- a/graphblas/tests/test_io.py +++ b/graphblas/tests/test_io.py @@ -59,18 +59,24 @@ def test_vector_to_from_numpy(): csr = gb.io.to_scipy_sparse(v, "csr") assert csr.nnz == 2 - assert ss.isspmatrix_csr(csr) + # 2023-06-25: scipy 1.11.0 added `sparray` and changed e.g. `ss.isspmatrix_csr` + assert isinstance(csr, getattr(ss, "sparray", ss.spmatrix)) + assert csr.format == "csr" np.testing.assert_array_equal(csr.toarray(), np.array([[0.0, 2.0, 4.1]])) csc = gb.io.to_scipy_sparse(v, "csc") assert csc.nnz == 2 - assert ss.isspmatrix_csc(csc) + # 2023-06-25: scipy 1.11.0 added `sparray` and changed e.g. `ss.isspmatrix_csc` + assert isinstance(csc, getattr(ss, "sparray", ss.spmatrix)) + assert csc.format == "csc" np.testing.assert_array_equal(csc.toarray(), np.array([[0.0, 2.0, 4.1]]).T) # default to csr-like coo = gb.io.to_scipy_sparse(v, "coo") assert coo.shape == csr.shape - assert ss.isspmatrix_coo(coo) + # 2023-06-25: scipy 1.11.0 added `sparray` and changed e.g. `ss.isspmatrix_coo` + assert isinstance(coo, getattr(ss, "sparray", ss.spmatrix)) + assert coo.format == "coo" assert coo.nnz == 2 np.testing.assert_array_equal(coo.toarray(), np.array([[0.0, 2.0, 4.1]])) @@ -99,7 +105,9 @@ def test_matrix_to_from_numpy(): for format in ["csr", "csc", "coo"]: sparse = gb.io.to_scipy_sparse(M, format) - assert getattr(ss, f"isspmatrix_{format}")(sparse) + # 2023-06-25: scipy 1.11.0 added `sparray` and changed e.g. `ss.isspmatrix_csr` + assert isinstance(sparse, getattr(ss, "sparray", ss.spmatrix)) + assert sparse.format == format assert sparse.nnz == 3 np.testing.assert_array_equal(sparse.toarray(), a) M2 = gb.io.from_scipy_sparse(sparse) diff --git a/scripts/check_versions.sh b/scripts/check_versions.sh index 4bfb553b8..22f0b3cca 100755 --- a/scripts/check_versions.sh +++ b/scripts/check_versions.sh @@ -5,12 +5,12 @@ # Tip: add `--json` for more information. conda search 'numpy[channel=conda-forge]>=1.25.0' conda search 'pandas[channel=conda-forge]>=2.0.2' -conda search 'scipy[channel=conda-forge]>=1.10.1' +conda search 'scipy[channel=conda-forge]>=1.11.0' conda search 'networkx[channel=conda-forge]>=3.1' -conda search 'awkward[channel=conda-forge]>=2.2.2' +conda search 'awkward[channel=conda-forge]>=2.2.4' conda search 'sparse[channel=conda-forge]>=0.14.0' -conda search 'fast_matrix_market[channel=conda-forge]>=1.6.0' -conda search 'numba[channel=conda-forge]>=0.57.0' +conda search 'fast_matrix_market[channel=conda-forge]>=1.7.2' +conda search 'numba[channel=conda-forge]>=0.57.1' conda search 'pyyaml[channel=conda-forge]>=6.0' conda search 'flake8-bugbear[channel=conda-forge]>=23.6.5' conda search 'flake8-simplify[channel=conda-forge]>=0.20.0' From 3416f3121a7ab108d3ac72629a80dcfcc68bde42 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Mon, 26 Jun 2023 23:26:19 -0500 Subject: [PATCH 15/40] oops fix usage of check_status_carg --- graphblas/core/operator/binary.py | 4 ++-- graphblas/core/operator/indexunary.py | 4 ++-- graphblas/core/operator/semiring.py | 4 ++-- graphblas/core/operator/unary.py | 4 ++-- graphblas/core/ss/binary.py | 2 +- graphblas/core/ss/indexunary.py | 2 +- graphblas/core/ss/unary.py | 2 +- 7 files changed, 11 insertions(+), 11 deletions(-) diff --git a/graphblas/core/operator/binary.py b/graphblas/core/operator/binary.py index 5ea693c73..434ad91cb 100644 --- a/graphblas/core/operator/binary.py +++ b/graphblas/core/operator/binary.py @@ -506,7 +506,7 @@ def binary_wrapper(z, x, y): # pragma: no cover (numba) type_.gb_obj, ), "BinaryOp", - new_binary, + new_binary[0], ) op = TypedUserBinaryOp(new_type_obj, name, type_, ret_type, new_binary[0]) new_type_obj._add(op) @@ -611,7 +611,7 @@ def binary_wrapper(z_ptr, x_ptr, y_ptr): # pragma: no cover (numba) new_binary, binary_wrapper.cffi, ret_type._carg, dtype._carg, dtype2._carg ), "BinaryOp", - new_binary, + new_binary[0], ) op = TypedUserBinaryOp( self, diff --git a/graphblas/core/operator/indexunary.py b/graphblas/core/operator/indexunary.py index 442677147..8b1211258 100644 --- a/graphblas/core/operator/indexunary.py +++ b/graphblas/core/operator/indexunary.py @@ -194,7 +194,7 @@ def indexunary_wrapper(z, x, row, col, y): # pragma: no cover (numba) type_.gb_obj, ), "IndexUnaryOp", - new_indexunary, + new_indexunary[0], ) op = cls._typed_user_class(new_type_obj, name, type_, ret_type, new_indexunary[0]) new_type_obj._add(op) @@ -226,7 +226,7 @@ def _compile_udt(self, dtype, dtype2): new_indexunary, indexunary_wrapper.cffi, ret_type._carg, dtype._carg, dtype2._carg ), "IndexUnaryOp", - new_indexunary, + new_indexunary[0], ) op = TypedUserIndexUnaryOp( self, diff --git a/graphblas/core/operator/semiring.py b/graphblas/core/operator/semiring.py index 035a1c43b..d367461f6 100644 --- a/graphblas/core/operator/semiring.py +++ b/graphblas/core/operator/semiring.py @@ -228,7 +228,7 @@ def _build(cls, name, monoid, binaryop, *, anonymous=False): check_status_carg( lib.GrB_Semiring_new(new_semiring, monoid[binary_out].gb_obj, binary_func.gb_obj), "Semiring", - new_semiring, + new_semiring[0], ) ret_type = monoid[binary_out].return_type op = TypedUserSemiring( @@ -254,7 +254,7 @@ def _compile_udt(self, dtype, dtype2): ret_type = monoid.return_type new_semiring = ffi_new("GrB_Semiring*") status = lib.GrB_Semiring_new(new_semiring, monoid.gb_obj, binaryop.gb_obj) - check_status_carg(status, "Semiring", new_semiring) + check_status_carg(status, "Semiring", new_semiring[0]) op = TypedUserSemiring( new_semiring, self.name, diff --git a/graphblas/core/operator/unary.py b/graphblas/core/operator/unary.py index 4348b3b80..11ada4e48 100644 --- a/graphblas/core/operator/unary.py +++ b/graphblas/core/operator/unary.py @@ -239,7 +239,7 @@ def unary_wrapper(z, x): new_unary, unary_wrapper.cffi, ret_type.gb_obj, type_.gb_obj ), "UnaryOp", - new_unary, + new_unary[0], ) op = TypedUserUnaryOp(new_type_obj, name, type_, ret_type, new_unary[0]) new_type_obj._add(op) @@ -264,7 +264,7 @@ def _compile_udt(self, dtype, dtype2): check_status_carg( lib.GrB_UnaryOp_new(new_unary, unary_wrapper.cffi, ret_type._carg, dtype._carg), "UnaryOp", - new_unary, + new_unary[0], ) op = TypedUserUnaryOp(self, self.name, dtype, ret_type, new_unary[0]) self._udt_types[dtype] = ret_type diff --git a/graphblas/core/ss/binary.py b/graphblas/core/ss/binary.py index cb0003088..3d8983244 100644 --- a/graphblas/core/ss/binary.py +++ b/graphblas/core/ss/binary.py @@ -54,7 +54,7 @@ def register_new(name, jit_c_definition, left_type, right_type, ret_type): ffi_new("char[]", jit_c_definition.encode()), ), "BinaryOp", - gb_obj, + gb_obj[0], ) op = TypedJitBinaryOp( rv, funcname, left_type, ret_type, gb_obj[0], jit_c_definition, dtype2=right_type diff --git a/graphblas/core/ss/indexunary.py b/graphblas/core/ss/indexunary.py index e53a745d6..ceab64e16 100644 --- a/graphblas/core/ss/indexunary.py +++ b/graphblas/core/ss/indexunary.py @@ -48,7 +48,7 @@ def register_new(name, jit_c_definition, input_type, thunk_type, ret_type): ffi_new("char[]", jit_c_definition.encode()), ), "IndexUnaryOp", - gb_obj, + gb_obj[0], ) op = TypedJitIndexUnaryOp( rv, funcname, input_type, ret_type, gb_obj[0], jit_c_definition, dtype2=thunk_type diff --git a/graphblas/core/ss/unary.py b/graphblas/core/ss/unary.py index 943177cbd..72826ff43 100644 --- a/graphblas/core/ss/unary.py +++ b/graphblas/core/ss/unary.py @@ -46,7 +46,7 @@ def register_new(name, jit_c_definition, input_type, ret_type): ffi_new("char[]", jit_c_definition.encode()), ), "UnaryOp", - gb_obj, + gb_obj[0], ) op = TypedJitUnaryOp(rv, funcname, input_type, ret_type, gb_obj[0], jit_c_definition) rv._add(op) From 36be76699d257b3e01bca04e494cd45d94e8a784 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Mon, 26 Jun 2023 23:59:04 -0500 Subject: [PATCH 16/40] Add ways to use easily use burble in tests --- .github/workflows/imports.yml | 4 +++- graphblas/tests/conftest.py | 19 +++++++++++++++++++ graphblas/tests/test_ssjit.py | 33 +++++++++++++++++++-------------- 3 files changed, 41 insertions(+), 15 deletions(-) diff --git a/.github/workflows/imports.yml b/.github/workflows/imports.yml index 2b0b0ed9f..82ca21a56 100644 --- a/.github/workflows/imports.yml +++ b/.github/workflows/imports.yml @@ -54,5 +54,7 @@ jobs: python-version: ${{ needs.rngs.outputs.pyver }} # python-version: ${{ matrix.python-version }} - run: python -m pip install --upgrade pip + - run: pip install --pre suitesparse-graphblas # Use if we need pre-release - run: pip install -e .[default] - - run: ./scripts/test_imports.sh + - name: Run test imports + run: ./scripts/test_imports.sh diff --git a/graphblas/tests/conftest.py b/graphblas/tests/conftest.py index 0d1f4008a..0137ea59e 100644 --- a/graphblas/tests/conftest.py +++ b/graphblas/tests/conftest.py @@ -1,4 +1,5 @@ import atexit +import contextlib import functools import itertools import platform @@ -114,6 +115,24 @@ def ic(): # pragma: no cover (debug) return icecream.ic +@contextlib.contextmanager +def burble(): + """Show the burble diagnostics within a context.""" + # Don't keep track of previous state; always set to False when done + gb.ss.config["burble"] = True + try: + yield + finally: + gb.ss.config["burble"] = False + + +@pytest.fixture(scope="session", autouse=True) +def burble_all(): # pragma: no cover (debug) + """Show the burble diagnostics for the entire test.""" + with burble(): + yield burble + + def autocompute(func): @functools.wraps(func) def inner(*args, **kwargs): diff --git a/graphblas/tests/test_ssjit.py b/graphblas/tests/test_ssjit.py index 455e32dc3..212edecde 100644 --- a/graphblas/tests/test_ssjit.py +++ b/graphblas/tests/test_ssjit.py @@ -5,7 +5,7 @@ from graphblas import backend, binary, dtypes, indexunary, select, unary from graphblas.core import _supports_udfs as supports_udfs -from .conftest import autocompute +from .conftest import autocompute, burble from graphblas import Vector # isort:skip (for dask-graphblas) @@ -20,9 +20,10 @@ def v(): @autocompute def test_jit_udt(): - dtype = dtypes.ss.register_new( - "myquaternion", "typedef struct { float x [4][4] ; int color ; } myquaternion ;" - ) + with burble(): + dtype = dtypes.ss.register_new( + "myquaternion", "typedef struct { float x [4][4] ; int color ; } myquaternion ;" + ) assert not hasattr(dtypes, "myquaternion") assert dtypes.ss.myquaternion is dtype assert dtype.name == "myquaternion" @@ -41,7 +42,8 @@ def test_jit_udt(): def test_jit_unary(v): cdef = "void square (float *z, float *x) { (*z) = (*x) * (*x) ; } ;" - square = unary.ss.register_new("square", cdef, "FP32", "FP32") + with burble(): + square = unary.ss.register_new("square", cdef, "FP32", "FP32") assert not hasattr(unary, "square") assert unary.ss.square is square assert square.name == "ss.square" @@ -58,13 +60,14 @@ def test_jit_unary(v): def test_jit_binary(v): cdef = "void absdiff (double *z, double *x, double *y) { (*z) = fabs ((*x) - (*y)) ; }" - absdiff = binary.ss.register_new( - "absdiff", - cdef, - "FP64", - "FP64", - "FP64", - ) + with burble(): + absdiff = binary.ss.register_new( + "absdiff", + cdef, + "FP64", + "FP64", + "FP64", + ) assert not hasattr(binary, "absdiff") assert binary.ss.absdiff is absdiff assert absdiff.name == "ss.absdiff" @@ -87,7 +90,8 @@ def test_jit_indexunary(v): "void diffy (double *z, double *x, GrB_Index i, GrB_Index j, double *y) " "{ (*z) = (i + j) * fabs ((*x) - (*y)) ; }" ) - diffy = indexunary.ss.register_new("diffy", cdef, "FP64", "FP64", "FP64") + with burble(): + diffy = indexunary.ss.register_new("diffy", cdef, "FP64", "FP64", "FP64") assert not hasattr(indexunary, "diffy") assert indexunary.ss.diffy is diffy assert not hasattr(select, "diffy") @@ -110,7 +114,8 @@ def test_jit_select(v): "void woot (bool *z, const int32_t *x, GrB_Index i, GrB_Index j, int32_t *y) " "{ (*z) = ((*x) + i + j == (*y)) ; }" ) - woot = select.ss.register_new("woot", cdef, "INT32", "INT32") + with burble(): + woot = select.ss.register_new("woot", cdef, "INT32", "INT32") assert not hasattr(select, "woot") assert select.ss.woot is woot assert not hasattr(indexunary, "woot") From 20bde6345f715f057014c06fcccbf1186e022d24 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Tue, 27 Jun 2023 00:05:43 -0500 Subject: [PATCH 17/40] Try this to allow pre-release --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 2a411f53a..7739a06ed 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -63,7 +63,7 @@ dependencies = [ "pyyaml >=5.4", # These won't be installed by default after 2024.3.0 # Use e.g. "python-graphblas[suitesparse]" or "python-graphblas[default]" instead - "suitesparse-graphblas >=8.0.1.0, <8.1", + "suitesparse-graphblas >8.0.0, <8.1", "numba >=0.55; python_version<'3.12'", # make optional where numba is not supported ] From 70a7b98c7bd130fec7fbb749d70cbaa1506671ff Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Tue, 27 Jun 2023 00:09:13 -0500 Subject: [PATCH 18/40] oops and here --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 7739a06ed..759489603 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -75,7 +75,7 @@ changelog = "https://github.com/python-graphblas/python-graphblas/releases" [project.optional-dependencies] suitesparse = [ - "suitesparse-graphblas >=8.0.1.0, <8.1", + "suitesparse-graphblas >8.0.0, <8.1", ] networkx = [ "networkx >=2.8", From e259d22d5dcd7fef43ebfdaa56dbf8c9c60092ce Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Wed, 28 Jun 2023 12:05:42 -0500 Subject: [PATCH 19/40] Fix bad merge --- .github/workflows/test_and_build.yml | 2 +- graphblas/binary/ss.py | 2 -- graphblas/unary/ss.py | 2 -- pyproject.toml | 4 ++-- 4 files changed, 3 insertions(+), 7 deletions(-) diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index 19a9449e4..5920bb765 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -85,7 +85,7 @@ jobs: shell: bash -l {0} strategy: # To "stress test" in CI, set `fail-fast` to `false` and perhaps add more items to `matrix.slowtask` - fail-fast: true + fail-fast: false # XXX TODO FIXME temporary # The build matrix is [os]x[slowtask] and then randomly chooses [pyver] and [sourcetype]. # This should ensure we'll have full code coverage (i.e., no chance of getting unlucky), # since we need to run all slow tests on Windows and non-Windoes OSes. diff --git a/graphblas/binary/ss.py b/graphblas/binary/ss.py index a8fd18fa5..0c294e322 100644 --- a/graphblas/binary/ss.py +++ b/graphblas/binary/ss.py @@ -3,6 +3,4 @@ _delayed = {} -_delayed = {} - del operator diff --git a/graphblas/unary/ss.py b/graphblas/unary/ss.py index c4ed01bad..e97784612 100644 --- a/graphblas/unary/ss.py +++ b/graphblas/unary/ss.py @@ -3,6 +3,4 @@ _delayed = {} -_delayed = {} - del operator diff --git a/pyproject.toml b/pyproject.toml index 759489603..e32bd4017 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -63,7 +63,7 @@ dependencies = [ "pyyaml >=5.4", # These won't be installed by default after 2024.3.0 # Use e.g. "python-graphblas[suitesparse]" or "python-graphblas[default]" instead - "suitesparse-graphblas >8.0.0, <8.1", + "suitesparse-graphblas >=7.4.0.0, <8.1", "numba >=0.55; python_version<'3.12'", # make optional where numba is not supported ] @@ -75,7 +75,7 @@ changelog = "https://github.com/python-graphblas/python-graphblas/releases" [project.optional-dependencies] suitesparse = [ - "suitesparse-graphblas >8.0.0, <8.1", + "suitesparse-graphblas >=7.4.0.0, <8.1", ] networkx = [ "networkx >=2.8", From 05d3db860c1e95b9277d8b14c3c390c649fb2adb Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Wed, 28 Jun 2023 12:35:15 -0500 Subject: [PATCH 20/40] Try to fix SS JIT config when testing on Linux --- .github/workflows/test_and_build.yml | 2 +- graphblas/tests/test_ssjit.py | 26 +++++++++++++++++++++++++- 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index 5920bb765..3b0c88a60 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -320,7 +320,7 @@ jobs: if [[ $H && $normal ]] ; then if [[ $macos ]] ; then echo " $vanilla" ; elif [[ $windows ]] ; then echo " $suitesparse" ; fi ; fi)$( \ if [[ $H && $bizarro ]] ; then if [[ $macos ]] ; then echo " $suitesparse" ; elif [[ $windows ]] ; then echo " $vanilla" ; fi ; fi) echo ${args} - (cd .. && pytest -v --pyargs suitesparse_graphblas) # Don't use our conftest.py + (cd .. && pytest --pyargs suitesparse_graphblas -s -k test_print_jit_config && pytest -v --pyargs suitesparse_graphblas) # Don't use our conftest.py set -x # echo on coverage run -m pytest --color=yes --randomly -v ${args} \ ${{ matrix.slowtask == 'pytest_normal' && '--runslow' || '' }} diff --git a/graphblas/tests/test_ssjit.py b/graphblas/tests/test_ssjit.py index 212edecde..471680088 100644 --- a/graphblas/tests/test_ssjit.py +++ b/graphblas/tests/test_ssjit.py @@ -1,7 +1,10 @@ +import os + import numpy as np import pytest from numpy.testing import assert_array_equal +import graphblas as gb from graphblas import backend, binary, dtypes, indexunary, select, unary from graphblas.core import _supports_udfs as supports_udfs @@ -10,7 +13,26 @@ from graphblas import Vector # isort:skip (for dask-graphblas) if backend != "suitesparse": - pytest.skip("not suitesparse", allow_module_level=True) + pytest.skip("not suitesparse backend", allow_module_level=True) +if gb.ss.about["library_version"][0] < 8: + pytest.skip("not SuiteSparse:GraphBLAS >=8", allow_module_level=True) + + +@pytest.fixture(scope="module", autouse=True) +def _setup_jit(): + if "CONDA_PREFIX" not in os.environ: + return + conda_prefix = os.environ["CONDA_PREFIX"] + gb.ss.config["jit_c_cmake_libs"] = ( + f"m;dl;{conda_prefix}/x86_64-conda-linux-gnu/lib/libgomp.so;" + f"{conda_prefix}/x86_64-conda-linux-gnu/sysroot/usr/lib/libpthread.so" + ) + gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/bin/cc" + gb.ss.config["jit_c_libraries"] = ( + f" -lm -ldl {conda_prefix}/x86_64-conda-linux-gnu/lib/libgomp.so " + f"{conda_prefix}/x86_64-conda-linux-gnu/sysroot/usr/lib/libpthread.so" + ) + gb.ss.config["jit_c_control"] = "on" @pytest.fixture @@ -20,6 +42,7 @@ def v(): @autocompute def test_jit_udt(): + print(gb.ss.config) # XXX with burble(): dtype = dtypes.ss.register_new( "myquaternion", "typedef struct { float x [4][4] ; int color ; } myquaternion ;" @@ -41,6 +64,7 @@ def test_jit_udt(): def test_jit_unary(v): + print(gb.ss.config) # XXX cdef = "void square (float *z, float *x) { (*z) = (*x) * (*x) ; } ;" with burble(): square = unary.ss.register_new("square", cdef, "FP32", "FP32") From 8ffd6ba70cd48864d664db29c681dd3cd6929f0e Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Wed, 28 Jun 2023 12:50:49 -0500 Subject: [PATCH 21/40] Install c-compiler for JIT --- .github/workflows/test_and_build.yml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index 3b0c88a60..2b55d35bd 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -265,7 +265,7 @@ jobs: echo "versions: np${npver} sp${spver} pd${pdver} ak${akver} nx${nxver} numba${numbaver} yaml${yamlver} sparse${sparsever} psgver${psgver}" set -x # echo on - $(command -v mamba || command -v conda) install packaging pytest coverage coveralls=3.3.1 pytest-randomly cffi donfig tomli \ + $(command -v mamba || command -v conda) install packaging pytest coverage coveralls=3.3.1 pytest-randomly cffi donfig tomli c-compiler \ pyyaml${yamlver} ${sparse} pandas${pdver} scipy${spver} numpy${npver} ${awkward} \ networkx${nxver} ${numba} ${fmm} ${psg} \ ${{ matrix.slowtask == 'pytest_bizarro' && 'black' || '' }} \ @@ -293,6 +293,12 @@ jobs: pip install --no-deps git+https://github.com/GraphBLAS/python-suitesparse-graphblas.git@main#egg=suitesparse-graphblas fi pip install --no-deps -e . + - name: python-suitesparse-graphblas tests + run: | + # Don't use our conftest.py ; allow `test_print_jit_config` to fail if it doesn't exist + (cd .. + pytest --pyargs suitesparse_graphblas -s -k test_print_jit_config + pytest -v --pyargs suitesparse_graphblas) - name: Unit tests run: | A=${{ needs.rngs.outputs.mapnumpy == 'A' || '' }} ; B=${{ needs.rngs.outputs.mapnumpy == 'B' || '' }} @@ -320,7 +326,6 @@ jobs: if [[ $H && $normal ]] ; then if [[ $macos ]] ; then echo " $vanilla" ; elif [[ $windows ]] ; then echo " $suitesparse" ; fi ; fi)$( \ if [[ $H && $bizarro ]] ; then if [[ $macos ]] ; then echo " $suitesparse" ; elif [[ $windows ]] ; then echo " $vanilla" ; fi ; fi) echo ${args} - (cd .. && pytest --pyargs suitesparse_graphblas -s -k test_print_jit_config && pytest -v --pyargs suitesparse_graphblas) # Don't use our conftest.py set -x # echo on coverage run -m pytest --color=yes --randomly -v ${args} \ ${{ matrix.slowtask == 'pytest_normal' && '--runslow' || '' }} From 00d2e6baaa0c8b2fd8ae5158d4bb02a40c476c19 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Wed, 28 Jun 2023 13:46:34 -0500 Subject: [PATCH 22/40] More experimentation --- .github/workflows/test_and_build.yml | 2 +- graphblas/tests/test_ssjit.py | 52 +++++++++++++++++++++++----- 2 files changed, 44 insertions(+), 10 deletions(-) diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index 2b55d35bd..9a5cdde00 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -265,7 +265,7 @@ jobs: echo "versions: np${npver} sp${spver} pd${pdver} ak${akver} nx${nxver} numba${numbaver} yaml${yamlver} sparse${sparsever} psgver${psgver}" set -x # echo on - $(command -v mamba || command -v conda) install packaging pytest coverage coveralls=3.3.1 pytest-randomly cffi donfig tomli c-compiler \ + $(command -v mamba || command -v conda) install packaging pytest coverage coveralls=3.3.1 pytest-randomly cffi donfig tomli c-compiler cmake make \ pyyaml${yamlver} ${sparse} pandas${pdver} scipy${spver} numpy${npver} ${awkward} \ networkx${nxver} ${numba} ${fmm} ${psg} \ ${{ matrix.slowtask == 'pytest_bizarro' && 'black' || '' }} \ diff --git a/graphblas/tests/test_ssjit.py b/graphblas/tests/test_ssjit.py index 471680088..aadbdff05 100644 --- a/graphblas/tests/test_ssjit.py +++ b/graphblas/tests/test_ssjit.py @@ -1,4 +1,5 @@ import os +import sys import numpy as np import pytest @@ -23,16 +24,47 @@ def _setup_jit(): if "CONDA_PREFIX" not in os.environ: return conda_prefix = os.environ["CONDA_PREFIX"] - gb.ss.config["jit_c_cmake_libs"] = ( - f"m;dl;{conda_prefix}/x86_64-conda-linux-gnu/lib/libgomp.so;" - f"{conda_prefix}/x86_64-conda-linux-gnu/sysroot/usr/lib/libpthread.so" - ) - gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/bin/cc" - gb.ss.config["jit_c_libraries"] = ( - f" -lm -ldl {conda_prefix}/x86_64-conda-linux-gnu/lib/libgomp.so " - f"{conda_prefix}/x86_64-conda-linux-gnu/sysroot/usr/lib/libpthread.so" - ) gb.ss.config["jit_c_control"] = "on" + # gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/bin/cc" + if sys.platform == "linux": + gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/bin/x86_64-conda-linux-gnu-cc" + gb.ss.config["jit_c_compiler_flags"] = ( + "-march=nocona -mtune=haswell -ftree-vectorize -fPIC -fstack-protector-strong " + f"-fno-plt -O2 -ffunction-sections -pipe -isystem {conda_prefix}/include -Wundef " + "-std=c11 -lm -Wno-pragmas -fexcess-precision=fast -fcx-limited-range " + "-fno-math-errno -fwrapv -O3 -DNDEBUG -fopenmp -fPIC" + ) + gb.ss.config["jit_c_linker_flags"] = ( + "-Wl,-O2 -Wl,--sort-common -Wl,--as-needed -Wl,-z,relro -Wl,-z,now " + "-Wl,--disable-new-dtags -Wl,--gc-sections -Wl,--allow-shlib-undefined " + f"-Wl,-rpath,{conda_prefix}/lib -Wl,-rpath-link,{conda_prefix}/lib " + f"-L{conda_prefix}/lib -shared" + ) + gb.ss.config["jit_c_libraries"] = ( + f"-lm -ldl {conda_prefix}/lib/libgomp.so " + f"{conda_prefix}/x86_64-conda-linux-gnu/sysroot/usr/lib/libpthread.so" + ) + gb.ss.config["jit_c_cmake_libs"] = ( + f"m;dl;{conda_prefix}/lib/libgomp.so;" + f"{conda_prefix}/x86_64-conda-linux-gnu/sysroot/usr/lib/libpthread.so" + ) + elif sys.platform == "win32": + pass + else: + gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/x86_64-apple-darwin13.4.0-clang" + gb.ss.config["jit_c_compiler_flags"] = ( + "-march=core2 -mtune=haswell -mssse3 -ftree-vectorize -fPIC -fPIE " + f"-fstack-protector-strong -O2 -pipe -isystem {conda_prefix}/include -DGBNCPUFEAT " + "-Wno-pointer-sign -O3 -DNDEBUG -fopenmp=libomp -fPIC -arch x86_64 -isysroot " + "/Applications/Xcode_13.2.1.app/Contents/Developer/Platforms/MacOSX.platform" + "/Developer/SDKs/MacOSX10.9.sdk" + ) + gb.ss.config["jit_c_linker_flags"] = ( + "-Wl,-pie -Wl,-headerpad_max_install_names -Wl,-dead_strip_dylibs " + f"-Wl,-rpath,{conda_prefix}/lib -L{conda_prefix}/lib -dynamiclib" + ) + gb.ss.config["jit_c_libraries"] = f"-lm -ldl {conda_prefix}/lib/libomp.dylib" + gb.ss.config["jit_c_cmake_libs"] = f"m;dl;{conda_prefix}/lib/libomp.dylib" @pytest.fixture @@ -42,6 +74,7 @@ def v(): @autocompute def test_jit_udt(): + print("sys.platform:", sys.platform) print(gb.ss.config) # XXX with burble(): dtype = dtypes.ss.register_new( @@ -64,6 +97,7 @@ def test_jit_udt(): def test_jit_unary(v): + print("sys.platform:", sys.platform) print(gb.ss.config) # XXX cdef = "void square (float *z, float *x) { (*z) = (*x) * (*x) ; } ;" with burble(): From 76dc46733f9ddbf95ce40a1e04487777f0ce91c7 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Wed, 28 Jun 2023 17:03:44 -0500 Subject: [PATCH 23/40] Better handle JIT UDT numpy and numba dtypes --- graphblas/core/ss/dtypes.py | 36 ++++++++++++++++++++++++++--------- graphblas/dtypes/_core.py | 2 +- graphblas/tests/test_ssjit.py | 27 +++++++++++++++++++++++++- 3 files changed, 54 insertions(+), 11 deletions(-) diff --git a/graphblas/core/ss/dtypes.py b/graphblas/core/ss/dtypes.py index 259276d8e..2c7ad6df6 100644 --- a/graphblas/core/ss/dtypes.py +++ b/graphblas/core/ss/dtypes.py @@ -1,16 +1,19 @@ +import numpy as np + from ... import backend, dtypes from ...exceptions import check_status_carg from .. import _has_numba, ffi, lib ffi_new = ffi.new if _has_numba: + import numba from cffi import FFI from numba.core.typing import cffi_utils jit_ffi = FFI() -def register_new(name, jit_c_definition): +def register_new(name, jit_c_definition, *, np_type=None): if backend != "suitesparse": raise RuntimeError( "`gb.dtypes.ss.register_new` invalid when not using 'suitesparse' backend" @@ -39,23 +42,38 @@ def register_new(name, jit_c_definition): check_status_carg(lib.GxB_Type_size(size_ptr, gb_obj[0]), "Type", gb_obj[0]) size = size_ptr[0] - if _has_numba: + save_np_type = True + if np_type is None and _has_numba and numba.__version__[:5] > "0.56.": jit_ffi.cdef(jit_c_definition) numba_type = cffi_utils.map_type(jit_ffi.typeof(name), use_record_dtype=True) np_type = numba_type.dtype if np_type.itemsize != size: # pragma: no cover - # TODO: Should we warn or raise? - numba_type = np_type = None + raise RuntimeError( + "Size of compiled user-defined type does not match size of inferred numpy type: " + f"{size} != {np_type.itemsize} != {size}.\n\n" + f"UDT C definition: {jit_c_definition}\n" + f"numpy dtype: {np_type}\n\n" + "To get around this, you may pass `np_type=` keyword argument." + ) else: - # Instead of None, should we make these e.g. np.dtype((np.uint8, size))`? - numba_type = np_type = None + if np_type is not None: + np_type = np.dtype(np_type) + else: + # Not an ideal numpy type, but minimally useful + np_type = np.dtype((np.uint8, size)) + save_np_type = False + if _has_numba: + numba_type = numba.typeof(np_type).dtype + else: + numba_type = None # For now, let's use "opaque" unsigned bytes for the c type. rv = dtypes._core.DataType(name, gb_obj, None, f"uint8_t[{size}]", numba_type, np_type) dtypes._core._registry[gb_obj] = rv - if _has_numba: + if save_np_type or np_type not in dtypes._core._registry: dtypes._core._registry[np_type] = rv - dtypes._core._registry[numba_type] = rv - dtypes._core._registry[numba_type.name] = rv + if numba_type is not None and (save_np_type or numba_type not in dtypes._core._registry): + dtypes._core._registry[numba_type] = rv + dtypes._core._registry[numba_type.name] = rv setattr(dtypes.ss, name, rv) return rv diff --git a/graphblas/dtypes/_core.py b/graphblas/dtypes/_core.py index 345c1be81..30ac6984c 100644 --- a/graphblas/dtypes/_core.py +++ b/graphblas/dtypes/_core.py @@ -22,7 +22,7 @@ def __init__(self, name, gb_obj, gb_name, c_type, numba_type, np_type): self.gb_name = gb_name self.c_type = c_type self.numba_type = numba_type - self.np_type = np.dtype(np_type) + self.np_type = np.dtype(np_type) if np_type is not None else np_type def __repr__(self): return self.name diff --git a/graphblas/tests/test_ssjit.py b/graphblas/tests/test_ssjit.py index aadbdff05..788c08942 100644 --- a/graphblas/tests/test_ssjit.py +++ b/graphblas/tests/test_ssjit.py @@ -13,6 +13,11 @@ from graphblas import Vector # isort:skip (for dask-graphblas) +try: + import numba +except ImportError: + numba = None + if backend != "suitesparse": pytest.skip("not suitesparse backend", allow_module_level=True) if gb.ss.about["library_version"][0] < 8: @@ -85,8 +90,20 @@ def test_jit_udt(): assert dtype.name == "myquaternion" assert str(dtype) == "myquaternion" assert dtype.gb_name is None - assert dtype.np_type == np.dtype([("x", " Date: Wed, 28 Jun 2023 17:05:34 -0500 Subject: [PATCH 24/40] Try this --- graphblas/tests/test_ssjit.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/graphblas/tests/test_ssjit.py b/graphblas/tests/test_ssjit.py index 788c08942..2dc4b6847 100644 --- a/graphblas/tests/test_ssjit.py +++ b/graphblas/tests/test_ssjit.py @@ -56,7 +56,8 @@ def _setup_jit(): elif sys.platform == "win32": pass else: - gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/x86_64-apple-darwin13.4.0-clang" + # gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/x86_64-apple-darwin13.4.0-clang" + gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/bin/cc" gb.ss.config["jit_c_compiler_flags"] = ( "-march=core2 -mtune=haswell -mssse3 -ftree-vectorize -fPIC -fPIE " f"-fstack-protector-strong -O2 -pipe -isystem {conda_prefix}/include -DGBNCPUFEAT " From 07632535e9a5db4ba21abbd0a6b80ad1291c43a0 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Wed, 28 Jun 2023 21:27:27 -0500 Subject: [PATCH 25/40] skip ssjit tests on macos for now --- graphblas/tests/test_ssjit.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/graphblas/tests/test_ssjit.py b/graphblas/tests/test_ssjit.py index 2dc4b6847..d67cd04ab 100644 --- a/graphblas/tests/test_ssjit.py +++ b/graphblas/tests/test_ssjit.py @@ -22,15 +22,18 @@ pytest.skip("not suitesparse backend", allow_module_level=True) if gb.ss.about["library_version"][0] < 8: pytest.skip("not SuiteSparse:GraphBLAS >=8", allow_module_level=True) +if sys.platform == "darwin": + pytest.skip("SuiteSparse JIT tests not yet working on macos", allow_module_level=True) @pytest.fixture(scope="module", autouse=True) def _setup_jit(): + # Configuration values below were obtained from the output of the JIT config + # in CI, but with paths changed to use `{conda_prefix}` where appropriate. if "CONDA_PREFIX" not in os.environ: return conda_prefix = os.environ["CONDA_PREFIX"] gb.ss.config["jit_c_control"] = "on" - # gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/bin/cc" if sys.platform == "linux": gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/bin/x86_64-conda-linux-gnu-cc" gb.ss.config["jit_c_compiler_flags"] = ( @@ -53,11 +56,10 @@ def _setup_jit(): f"m;dl;{conda_prefix}/lib/libgomp.so;" f"{conda_prefix}/x86_64-conda-linux-gnu/sysroot/usr/lib/libpthread.so" ) - elif sys.platform == "win32": - pass - else: - # gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/x86_64-apple-darwin13.4.0-clang" - gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/bin/cc" + elif sys.platform == "darwin": # pragma: no cover + # This is not yet working in CI + # gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/bin/cc" + gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/x86_64-apple-darwin13.4.0-clang" gb.ss.config["jit_c_compiler_flags"] = ( "-march=core2 -mtune=haswell -mssse3 -ftree-vectorize -fPIC -fPIE " f"-fstack-protector-strong -O2 -pipe -isystem {conda_prefix}/include -DGBNCPUFEAT " @@ -71,6 +73,8 @@ def _setup_jit(): ) gb.ss.config["jit_c_libraries"] = f"-lm -ldl {conda_prefix}/lib/libomp.dylib" gb.ss.config["jit_c_cmake_libs"] = f"m;dl;{conda_prefix}/lib/libomp.dylib" + elif sys.platform == "win32": + pass @pytest.fixture @@ -80,8 +84,6 @@ def v(): @autocompute def test_jit_udt(): - print("sys.platform:", sys.platform) - print(gb.ss.config) # XXX with burble(): dtype = dtypes.ss.register_new( "myquaternion", "typedef struct { float x [4][4] ; int color ; } myquaternion ;" @@ -123,8 +125,6 @@ def test_jit_udt(): def test_jit_unary(v): - print("sys.platform:", sys.platform) - print(gb.ss.config) # XXX cdef = "void square (float *z, float *x) { (*z) = (*x) * (*x) ; } ;" with burble(): square = unary.ss.register_new("square", cdef, "FP32", "FP32") From d7a9460338f83a4f7cc23b72e10cdfbdf12fea2a Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Thu, 29 Jun 2023 12:23:22 -0500 Subject: [PATCH 26/40] fix `ss.dtypes` --- graphblas/core/ss/dtypes.py | 18 +++++++++--------- scripts/check_versions.sh | 4 ++-- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/graphblas/core/ss/dtypes.py b/graphblas/core/ss/dtypes.py index 2c7ad6df6..14f7128f5 100644 --- a/graphblas/core/ss/dtypes.py +++ b/graphblas/core/ss/dtypes.py @@ -1,6 +1,6 @@ import numpy as np -from ... import backend, dtypes +from ... import backend, core, dtypes from ...exceptions import check_status_carg from .. import _has_numba, ffi, lib @@ -20,7 +20,7 @@ def register_new(name, jit_c_definition, *, np_type=None): ) if not name.isidentifier(): raise ValueError(f"`name` argument must be a valid Python identifier; got: {name!r}") - if name in dtypes._core._registry or hasattr(dtypes.ss, name): + if name in core.dtypes._registry or hasattr(dtypes.ss, name): raise ValueError(f"{name!r} name for dtype is unavailable") if len(name) > lib.GxB_MAX_NAME_LEN: raise ValueError( @@ -68,12 +68,12 @@ def register_new(name, jit_c_definition, *, np_type=None): numba_type = None # For now, let's use "opaque" unsigned bytes for the c type. - rv = dtypes._core.DataType(name, gb_obj, None, f"uint8_t[{size}]", numba_type, np_type) - dtypes._core._registry[gb_obj] = rv - if save_np_type or np_type not in dtypes._core._registry: - dtypes._core._registry[np_type] = rv - if numba_type is not None and (save_np_type or numba_type not in dtypes._core._registry): - dtypes._core._registry[numba_type] = rv - dtypes._core._registry[numba_type.name] = rv + rv = core.dtypes.DataType(name, gb_obj, None, f"uint8_t[{size}]", numba_type, np_type) + core.dtypes._registry[gb_obj] = rv + if save_np_type or np_type not in core.dtypes._registry: + core.dtypes._registry[np_type] = rv + if numba_type is not None and (save_np_type or numba_type not in core.dtypes._registry): + core.dtypes._registry[numba_type] = rv + core.dtypes._registry[numba_type.name] = rv setattr(dtypes.ss, name, rv) return rv diff --git a/scripts/check_versions.sh b/scripts/check_versions.sh index 22f0b3cca..b6622a56e 100755 --- a/scripts/check_versions.sh +++ b/scripts/check_versions.sh @@ -4,8 +4,8 @@ # This may be helpful when updating dependency versions in CI. # Tip: add `--json` for more information. conda search 'numpy[channel=conda-forge]>=1.25.0' -conda search 'pandas[channel=conda-forge]>=2.0.2' -conda search 'scipy[channel=conda-forge]>=1.11.0' +conda search 'pandas[channel=conda-forge]>=2.0.3' +conda search 'scipy[channel=conda-forge]>=1.11.1' conda search 'networkx[channel=conda-forge]>=3.1' conda search 'awkward[channel=conda-forge]>=2.2.4' conda search 'sparse[channel=conda-forge]>=0.14.0' From 8c3692adb588a0fcc3dbfb15ec6fc7e264c27ae0 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Sat, 1 Jul 2023 16:45:40 -0500 Subject: [PATCH 27/40] Run with SuiteSparse 7 and 8 --- .github/workflows/imports.yml | 2 +- graphblas/core/dtypes.py | 2 +- graphblas/core/ss/__init__.py | 3 ++ graphblas/core/ss/context.py | 9 +++++ graphblas/core/ss/descriptor.py | 25 ++++++++++--- graphblas/core/ss/dtypes.py | 9 +++++ graphblas/core/ss/indexunary.py | 9 +++++ graphblas/core/ss/select.py | 9 +++++ graphblas/core/ss/unary.py | 9 +++++ graphblas/dtypes/ss.py | 5 ++- graphblas/indexunary/ss.py | 5 ++- graphblas/select/ss.py | 5 ++- graphblas/ss/__init__.py | 7 +++- graphblas/ss/_core.py | 66 +++++++++++++++++++++++---------- graphblas/unary/ss.py | 5 ++- 15 files changed, 137 insertions(+), 33 deletions(-) diff --git a/.github/workflows/imports.yml b/.github/workflows/imports.yml index 82ca21a56..18e6f637c 100644 --- a/.github/workflows/imports.yml +++ b/.github/workflows/imports.yml @@ -54,7 +54,7 @@ jobs: python-version: ${{ needs.rngs.outputs.pyver }} # python-version: ${{ matrix.python-version }} - run: python -m pip install --upgrade pip - - run: pip install --pre suitesparse-graphblas # Use if we need pre-release + # - run: pip install --pre suitesparse-graphblas # Use if we need pre-release - run: pip install -e .[default] - name: Run test imports run: ./scripts/test_imports.sh diff --git a/graphblas/core/dtypes.py b/graphblas/core/dtypes.py index 30ac6984c..d7a83c99b 100644 --- a/graphblas/core/dtypes.py +++ b/graphblas/core/dtypes.py @@ -22,7 +22,7 @@ def __init__(self, name, gb_obj, gb_name, c_type, numba_type, np_type): self.gb_name = gb_name self.c_type = c_type self.numba_type = numba_type - self.np_type = np.dtype(np_type) if np_type is not None else np_type + self.np_type = np.dtype(np_type) if np_type is not None else None def __repr__(self): return self.name diff --git a/graphblas/core/ss/__init__.py b/graphblas/core/ss/__init__.py index e69de29bb..c2e83ddcc 100644 --- a/graphblas/core/ss/__init__.py +++ b/graphblas/core/ss/__init__.py @@ -0,0 +1,3 @@ +import suitesparse_graphblas as _ssgb + +_IS_SSGB7 = _ssgb.__version__.split(".", 1)[0] == "7" diff --git a/graphblas/core/ss/context.py b/graphblas/core/ss/context.py index 46921a60f..9b48bcaa4 100644 --- a/graphblas/core/ss/context.py +++ b/graphblas/core/ss/context.py @@ -2,9 +2,18 @@ from ...exceptions import InvalidValue, check_status, check_status_carg from .. import ffi, lib +from . import _IS_SSGB7 from .config import BaseConfig ffi_new = ffi.new +if _IS_SSGB7: + # Context was introduced in SuiteSparse:GraphBLAS 8.0 + import suitesparse_graphblas as ssgb + + raise ImportError( + "Context was added to SuiteSparse:GraphBLAS in version 8; " + f"current version is {ssgb.__version__}" + ) class Context(BaseConfig): diff --git a/graphblas/core/ss/descriptor.py b/graphblas/core/ss/descriptor.py index d09842996..52c43b95d 100644 --- a/graphblas/core/ss/descriptor.py +++ b/graphblas/core/ss/descriptor.py @@ -1,6 +1,7 @@ from ...exceptions import check_status, check_status_carg from .. import ffi, lib from ..descriptor import Descriptor +from . import _IS_SSGB7 from .config import BaseConfig ffi_new = ffi.new @@ -18,7 +19,8 @@ class _DescriptorConfig(BaseConfig): _get_function = "GxB_Desc_get" _set_function = "GxB_Desc_set" - _context_keys = {"chunk", "gpu_id", "nthreads"} + if not _IS_SSGB7: + _context_keys = {"chunk", "gpu_id", "nthreads"} _options = { # GrB "output_replace": (lib.GrB_OUTP, "GrB_Desc_Value"), @@ -27,13 +29,25 @@ class _DescriptorConfig(BaseConfig): "transpose_first": (lib.GrB_INP0, "GrB_Desc_Value"), "transpose_second": (lib.GrB_INP1, "GrB_Desc_Value"), # GxB - "chunk": (lib.GxB_CONTEXT_CHUNK, "double"), - "gpu_id": (lib.GxB_CONTEXT_GPU_ID, "int"), - "nthreads": (lib.GxB_CONTEXT_NTHREADS, "int"), "axb_method": (lib.GxB_AxB_METHOD, "GrB_Desc_Value"), "sort": (lib.GxB_SORT, "int"), "secure_import": (lib.GxB_IMPORT, "int"), } + if _IS_SSGB7: + _options.update( + { + "nthreads": (lib.GxB_DESCRIPTOR_NTHREADS, "int"), + "chunk": (lib.GxB_DESCRIPTOR_CHUNK, "double"), + } + ) + else: + _options.update( + { + "chunk": (lib.GxB_CONTEXT_CHUNK, "double"), + "gpu_id": (lib.GxB_CONTEXT_GPU_ID, "int"), + "nthreads": (lib.GxB_CONTEXT_NTHREADS, "int"), + } + ) _enumerations = { # GrB "output_replace": { @@ -86,8 +100,9 @@ class _DescriptorConfig(BaseConfig): "axb_method": "default", "sort": False, "secure_import": False, - "gpu_id": -1, # -1 means no GPU } + if not _IS_SSGB7: + _defaults["gpu_id"] = -1 def __init__(self): gb_obj = ffi_new("GrB_Descriptor*") diff --git a/graphblas/core/ss/dtypes.py b/graphblas/core/ss/dtypes.py index 14f7128f5..ad13364e2 100644 --- a/graphblas/core/ss/dtypes.py +++ b/graphblas/core/ss/dtypes.py @@ -3,8 +3,17 @@ from ... import backend, core, dtypes from ...exceptions import check_status_carg from .. import _has_numba, ffi, lib +from . import _IS_SSGB7 ffi_new = ffi.new +if _IS_SSGB7: + # JIT was introduced in SuiteSparse:GraphBLAS 8.0 + import suitesparse_graphblas as ssgb + + raise ImportError( + "JIT was added to SuiteSparse:GraphBLAS in version 8; " + f"current version is {ssgb.__version__}" + ) if _has_numba: import numba from cffi import FFI diff --git a/graphblas/core/ss/indexunary.py b/graphblas/core/ss/indexunary.py index ceab64e16..cbc01f20b 100644 --- a/graphblas/core/ss/indexunary.py +++ b/graphblas/core/ss/indexunary.py @@ -4,8 +4,17 @@ from .. import NULL, ffi, lib from ..operator.base import TypedOpBase from ..operator.indexunary import IndexUnaryOp, TypedUserIndexUnaryOp +from . import _IS_SSGB7 ffi_new = ffi.new +if _IS_SSGB7: + # JIT was introduced in SuiteSparse:GraphBLAS 8.0 + import suitesparse_graphblas as ssgb + + raise ImportError( + "JIT was added to SuiteSparse:GraphBLAS in version 8; " + f"current version is {ssgb.__version__}" + ) class TypedJitIndexUnaryOp(TypedOpBase): diff --git a/graphblas/core/ss/select.py b/graphblas/core/ss/select.py index ee9dfb004..4a82791ba 100644 --- a/graphblas/core/ss/select.py +++ b/graphblas/core/ss/select.py @@ -3,8 +3,17 @@ from .. import ffi from ..operator.base import TypedOpBase from ..operator.select import SelectOp, TypedUserSelectOp +from . import _IS_SSGB7 ffi_new = ffi.new +if _IS_SSGB7: + # JIT was introduced in SuiteSparse:GraphBLAS 8.0 + import suitesparse_graphblas as ssgb + + raise ImportError( + "JIT was added to SuiteSparse:GraphBLAS in version 8; " + f"current version is {ssgb.__version__}" + ) class TypedJitSelectOp(TypedOpBase): diff --git a/graphblas/core/ss/unary.py b/graphblas/core/ss/unary.py index 72826ff43..577fcffc4 100644 --- a/graphblas/core/ss/unary.py +++ b/graphblas/core/ss/unary.py @@ -4,8 +4,17 @@ from .. import NULL, ffi, lib from ..operator.base import TypedOpBase from ..operator.unary import TypedUserUnaryOp, UnaryOp +from . import _IS_SSGB7 ffi_new = ffi.new +if _IS_SSGB7: + # JIT was introduced in SuiteSparse:GraphBLAS 8.0 + import suitesparse_graphblas as ssgb + + raise ImportError( + "JIT was added to SuiteSparse:GraphBLAS in version 8; " + f"current version is {ssgb.__version__}" + ) class TypedJitUnaryOp(TypedOpBase): diff --git a/graphblas/dtypes/ss.py b/graphblas/dtypes/ss.py index 9f6083e01..d8c8b6b09 100644 --- a/graphblas/dtypes/ss.py +++ b/graphblas/dtypes/ss.py @@ -1 +1,4 @@ -from ..core.ss.dtypes import register_new # noqa: F401 +from ..core.ss import _IS_SSGB7 + +if not _IS_SSGB7: + from ..core.ss.dtypes import register_new # noqa: F401 diff --git a/graphblas/indexunary/ss.py b/graphblas/indexunary/ss.py index 58218df6f..47c906de5 100644 --- a/graphblas/indexunary/ss.py +++ b/graphblas/indexunary/ss.py @@ -1,5 +1,8 @@ from ..core import operator -from ..core.ss.indexunary import register_new # noqa: F401 +from ..core.ss import _IS_SSGB7 + +if not _IS_SSGB7: + from ..core.ss.indexunary import register_new # noqa: F401 _delayed = {} diff --git a/graphblas/select/ss.py b/graphblas/select/ss.py index 173067382..ed7cfbe9a 100644 --- a/graphblas/select/ss.py +++ b/graphblas/select/ss.py @@ -1,5 +1,8 @@ from ..core import operator -from ..core.ss.select import register_new # noqa: F401 +from ..core.ss import _IS_SSGB7 + +if not _IS_SSGB7: + from ..core.ss.select import register_new # noqa: F401 _delayed = {} diff --git a/graphblas/ss/__init__.py b/graphblas/ss/__init__.py index 25025a134..b723d9cb8 100644 --- a/graphblas/ss/__init__.py +++ b/graphblas/ss/__init__.py @@ -1,2 +1,5 @@ -from ..core.ss.context import Context, global_context -from ._core import about, concat, config, diag +from ._core import _IS_SSGB7, about, concat, config, diag + +if not _IS_SSGB7: + # Context was introduced in SuiteSparse:GraphBLAS 8.0 + from ..core.ss.context import Context, global_context diff --git a/graphblas/ss/_core.py b/graphblas/ss/_core.py index f05c05f6b..b4ab63318 100644 --- a/graphblas/ss/_core.py +++ b/graphblas/ss/_core.py @@ -5,8 +5,8 @@ from ..core.descriptor import lookup as descriptor_lookup from ..core.matrix import Matrix, TransposedMatrix from ..core.scalar import _as_scalar +from ..core.ss import _IS_SSGB7 from ..core.ss.config import BaseConfig -from ..core.ss.context import global_context from ..core.ss.matrix import _concat_mn from ..core.vector import Vector from ..dtypes import INT64 @@ -135,7 +135,8 @@ class GlobalConfig(BaseConfig): _get_function = "GxB_Global_Option_get" _set_function = "GxB_Global_Option_set" - _context_keys = {"chunk", "gpu_id", "nthreads"} + if not _IS_SSGB7: + _context_keys = {"chunk", "gpu_id", "nthreads"} _null_valid = {"bitmap_switch"} _options = { # Matrix/Vector format @@ -150,20 +151,32 @@ class GlobalConfig(BaseConfig): # Diagnostics (skipping "printf" and "flush" for now) "burble": (lib.GxB_BURBLE, "bool"), "print_1based": (lib.GxB_PRINT_1BASED, "bool"), - # JIT control - "jit_c_control": (lib.GxB_JIT_C_CONTROL, "int"), - "jit_use_cmake": (lib.GxB_JIT_USE_CMAKE, "bool"), - "jit_c_compiler_name": (lib.GxB_JIT_C_COMPILER_NAME, "char*"), - "jit_c_compiler_flags": (lib.GxB_JIT_C_COMPILER_FLAGS, "char*"), - "jit_c_linker_flags": (lib.GxB_JIT_C_LINKER_FLAGS, "char*"), - "jit_c_libraries": (lib.GxB_JIT_C_LIBRARIES, "char*"), - "jit_c_cmake_libs": (lib.GxB_JIT_C_CMAKE_LIBS, "char*"), - "jit_c_preface": (lib.GxB_JIT_C_PREFACE, "char*"), - "jit_error_log": (lib.GxB_JIT_ERROR_LOG, "char*"), - "jit_cache_path": (lib.GxB_JIT_CACHE_PATH, "char*"), - # CUDA GPU control - "gpu_id": (lib.GxB_GLOBAL_GPU_ID, "int"), } + if _IS_SSGB7: + _options.update( + { + "gpu_control": (lib.GxB_GLOBAL_GPU_CONTROL, "GrB_Desc_Value"), + "gpu_chunk": (lib.GxB_GLOBAL_GPU_CHUNK, "double"), + } + ) + else: + _options.update( + { + # JIT control + "jit_c_control": (lib.GxB_JIT_C_CONTROL, "int"), + "jit_use_cmake": (lib.GxB_JIT_USE_CMAKE, "bool"), + "jit_c_compiler_name": (lib.GxB_JIT_C_COMPILER_NAME, "char*"), + "jit_c_compiler_flags": (lib.GxB_JIT_C_COMPILER_FLAGS, "char*"), + "jit_c_linker_flags": (lib.GxB_JIT_C_LINKER_FLAGS, "char*"), + "jit_c_libraries": (lib.GxB_JIT_C_LIBRARIES, "char*"), + "jit_c_cmake_libs": (lib.GxB_JIT_C_CMAKE_LIBS, "char*"), + "jit_c_preface": (lib.GxB_JIT_C_PREFACE, "char*"), + "jit_error_log": (lib.GxB_JIT_ERROR_LOG, "char*"), + "jit_cache_path": (lib.GxB_JIT_CACHE_PATH, "char*"), + # CUDA GPU control + "gpu_id": (lib.GxB_GLOBAL_GPU_ID, "int"), + } + ) # Values to restore defaults _defaults = { "hyper_switch": lib.GxB_HYPER_DEFAULT, @@ -173,22 +186,29 @@ class GlobalConfig(BaseConfig): "chunk": 0, "burble": 0, "print_1based": 0, - "gpu_id": -1, # -1 means no GPU } + if not _IS_SSGB7: + _defaults["gpu_id"] = -1 # -1 means no GPU _enumerations = { "format": { "by_row": lib.GxB_BY_ROW, "by_col": lib.GxB_BY_COL, # "no_format": lib.GxB_NO_FORMAT, # Used by iterators; not valid here }, - "jit_c_control": { + } + if _IS_SSGB7: + _enumerations["gpu_control"] = { + "always": lib.GxB_GPU_ALWAYS, + "never": lib.GxB_GPU_NEVER, + } + else: + _enumerations["jit_c_control"] = { "off": lib.GxB_JIT_OFF, "pause": lib.GxB_JIT_PAUSE, "run": lib.GxB_JIT_RUN, "load": lib.GxB_JIT_LOAD, "on": lib.GxB_JIT_ON, - }, - } + } class About(Mapping): @@ -275,4 +295,10 @@ def __len__(self): about = About() -config = GlobalConfig(context=global_context) +if _IS_SSGB7: + config = GlobalConfig() +else: + # Context was introduced in SuiteSparse:GraphBLAS 8.0 + from ..core.ss.context import global_context + + config = GlobalConfig(context=global_context) diff --git a/graphblas/unary/ss.py b/graphblas/unary/ss.py index e97784612..9995e8c4e 100644 --- a/graphblas/unary/ss.py +++ b/graphblas/unary/ss.py @@ -1,5 +1,8 @@ from ..core import operator -from ..core.ss.unary import register_new # noqa: F401 +from ..core.ss import _IS_SSGB7 + +if not _IS_SSGB7: + from ..core.ss.unary import register_new # noqa: F401 _delayed = {} From 9a9d24ba9bc97a7cb3ec3fa6ccac7602bb2786ad Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Sat, 1 Jul 2023 23:49:18 -0500 Subject: [PATCH 28/40] Enable SS 7 in CI; and a bit more coverage --- .github/workflows/test_and_build.yml | 14 ++++------ .pre-commit-config.yaml | 2 +- graphblas/core/ss/binary.py | 14 ++++++++-- graphblas/core/ss/dtypes.py | 18 ++++++------ graphblas/core/ss/indexunary.py | 21 +++++++------- graphblas/core/ss/select.py | 21 +++++++------- graphblas/core/ss/unary.py | 21 +++++++------- graphblas/dtypes/ss.py | 5 +--- graphblas/indexunary/ss.py | 5 +--- graphblas/select/ss.py | 5 +--- graphblas/tests/test_ssjit.py | 42 ++++++++++++++++++++++++---- graphblas/unary/ss.py | 5 +--- 12 files changed, 98 insertions(+), 75 deletions(-) diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index 9a5cdde00..c24068d7b 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -134,7 +134,7 @@ jobs: 1 1 1 - 10000000 + 1 - name: Setup mamba uses: conda-incubator/setup-miniconda@v2 id: setup_mamba @@ -204,15 +204,13 @@ jobs: # But, it's still useful for us to test with different versions! psg="" if [[ ${{ steps.sourcetype.outputs.selected}} == "conda-forge" ]] ; then - psgver=$(python -c 'import random ; print(random.choice(["=8.0.1.0", ""]))') - # psgver=$(python -c 'import random ; print(random.choice(["=7.4.0", "=7.4.1", "=7.4.2", "=7.4.3.0", "=7.4.3.1", "=7.4.3.2", "=8.0.1.0", ""]))') + psgver=$(python -c 'import random ; print(random.choice(["=7.4.0", "=7.4.1", "=7.4.2", "=7.4.3.0", "=7.4.3.1", "=7.4.3.2", "=8.0.2.1", ""]))') psg=python-suitesparse-graphblas${psgver} elif [[ ${{ steps.sourcetype.outputs.selected}} == "wheel" ]] ; then - psgver=$(python -c 'import random ; print(random.choice(["==7.4.3.2"]))') + psgver=$(python -c 'import random ; print(random.choice(["==7.4.3.2", "==8.0.2.1", ""]))') elif [[ ${{ steps.sourcetype.outputs.selected}} == "source" ]] ; then # These should be exact versions - psgver=$(python -c 'import random ; print(random.choice(["==8.0.1.0", ""]))') - # psgver=$(python -c 'import random ; print(random.choice(["==7.4.0.0", "==7.4.1.0", "==7.4.2.0", "==7.4.3.0", "==7.4.3.1", "==7.4.3.2", "==8.0.1.0", ""]))') + psgver=$(python -c 'import random ; print(random.choice(["==7.4.0.0", "==7.4.1.0", "==7.4.2.0", "==7.4.3.0", "==7.4.3.1", "==7.4.3.2", "==8.0.2.1", ""]))') else psgver="" fi @@ -265,13 +263,13 @@ jobs: echo "versions: np${npver} sp${spver} pd${pdver} ak${akver} nx${nxver} numba${numbaver} yaml${yamlver} sparse${sparsever} psgver${psgver}" set -x # echo on - $(command -v mamba || command -v conda) install packaging pytest coverage coveralls=3.3.1 pytest-randomly cffi donfig tomli c-compiler cmake make \ + $(command -v mamba || command -v conda) install packaging pytest coverage coveralls=3.3.1 pytest-randomly cffi donfig tomli c-compiler cmake make m4 \ pyyaml${yamlver} ${sparse} pandas${pdver} scipy${spver} numpy${npver} ${awkward} \ networkx${nxver} ${numba} ${fmm} ${psg} \ ${{ matrix.slowtask == 'pytest_bizarro' && 'black' || '' }} \ ${{ matrix.slowtask == 'notebooks' && 'matplotlib nbconvert jupyter "ipython>=7"' || '' }} \ ${{ steps.sourcetype.outputs.selected == 'upstream' && 'cython' || '' }} \ - ${{ steps.sourcetype.outputs.selected != 'wheel' && '"graphblas>=8.0.1"' || '' }} \ + ${{ steps.sourcetype.outputs.selected != 'wheel' && '"graphblas>=7.4"' || '' }} \ ${{ contains(steps.pyver.outputs.selected, 'pypy') && 'pypy' || '' }} - name: Build extension module run: | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f0ca307e8..39567e4f1 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -51,7 +51,7 @@ repos: - id: isort # Let's keep `pyupgrade` even though `ruff --fix` probably does most of it - repo: https://github.com/asottile/pyupgrade - rev: v3.7.0 + rev: v3.8.0 hooks: - id: pyupgrade args: [--py38-plus] diff --git a/graphblas/core/ss/binary.py b/graphblas/core/ss/binary.py index 3d8983244..898257fac 100644 --- a/graphblas/core/ss/binary.py +++ b/graphblas/core/ss/binary.py @@ -4,6 +4,7 @@ from .. import NULL, ffi, lib from ..operator.base import TypedOpBase from ..operator.binary import BinaryOp, TypedUserBinaryOp +from . import _IS_SSGB7 ffi_new = ffi.new @@ -30,15 +31,22 @@ def jit_c_definition(self): def register_new(name, jit_c_definition, left_type, right_type, ret_type): - if backend != "suitesparse": + if backend != "suitesparse": # pragma: no cover (safety) raise RuntimeError( "`gb.binary.ss.register_new` invalid when not using 'suitesparse' backend" ) + if _IS_SSGB7: + # JIT was introduced in SuiteSparse:GraphBLAS 8.0 + import suitesparse_graphblas as ssgb + + raise RuntimeError( + "JIT was added to SuiteSparse:GraphBLAS in version 8; " + f"current version is {ssgb.__version__}" + ) left_type = lookup_dtype(left_type) right_type = lookup_dtype(right_type) ret_type = lookup_dtype(ret_type) - if not name.startswith("ss."): - name = f"ss.{name}" + name = name if name.startswith("ss.") else f"ss.{name}" module, funcname = BinaryOp._remove_nesting(name) rv = BinaryOp(name) diff --git a/graphblas/core/ss/dtypes.py b/graphblas/core/ss/dtypes.py index ad13364e2..d2eb5b416 100644 --- a/graphblas/core/ss/dtypes.py +++ b/graphblas/core/ss/dtypes.py @@ -6,14 +6,6 @@ from . import _IS_SSGB7 ffi_new = ffi.new -if _IS_SSGB7: - # JIT was introduced in SuiteSparse:GraphBLAS 8.0 - import suitesparse_graphblas as ssgb - - raise ImportError( - "JIT was added to SuiteSparse:GraphBLAS in version 8; " - f"current version is {ssgb.__version__}" - ) if _has_numba: import numba from cffi import FFI @@ -23,10 +15,18 @@ def register_new(name, jit_c_definition, *, np_type=None): - if backend != "suitesparse": + if backend != "suitesparse": # pragma: no cover (safety) raise RuntimeError( "`gb.dtypes.ss.register_new` invalid when not using 'suitesparse' backend" ) + if _IS_SSGB7: + # JIT was introduced in SuiteSparse:GraphBLAS 8.0 + import suitesparse_graphblas as ssgb + + raise RuntimeError( + "JIT was added to SuiteSparse:GraphBLAS in version 8; " + f"current version is {ssgb.__version__}" + ) if not name.isidentifier(): raise ValueError(f"`name` argument must be a valid Python identifier; got: {name!r}") if name in core.dtypes._registry or hasattr(dtypes.ss, name): diff --git a/graphblas/core/ss/indexunary.py b/graphblas/core/ss/indexunary.py index cbc01f20b..c0f185737 100644 --- a/graphblas/core/ss/indexunary.py +++ b/graphblas/core/ss/indexunary.py @@ -7,14 +7,6 @@ from . import _IS_SSGB7 ffi_new = ffi.new -if _IS_SSGB7: - # JIT was introduced in SuiteSparse:GraphBLAS 8.0 - import suitesparse_graphblas as ssgb - - raise ImportError( - "JIT was added to SuiteSparse:GraphBLAS in version 8; " - f"current version is {ssgb.__version__}" - ) class TypedJitIndexUnaryOp(TypedOpBase): @@ -33,15 +25,22 @@ def jit_c_definition(self): def register_new(name, jit_c_definition, input_type, thunk_type, ret_type): - if backend != "suitesparse": + if backend != "suitesparse": # pragma: no cover (safety) raise RuntimeError( "`gb.indexunary.ss.register_new` invalid when not using 'suitesparse' backend" ) + if _IS_SSGB7: + # JIT was introduced in SuiteSparse:GraphBLAS 8.0 + import suitesparse_graphblas as ssgb + + raise RuntimeError( + "JIT was added to SuiteSparse:GraphBLAS in version 8; " + f"current version is {ssgb.__version__}" + ) input_type = lookup_dtype(input_type) thunk_type = lookup_dtype(thunk_type) ret_type = lookup_dtype(ret_type) - if not name.startswith("ss."): - name = f"ss.{name}" + name = name if name.startswith("ss.") else f"ss.{name}" module, funcname = IndexUnaryOp._remove_nesting(name) rv = IndexUnaryOp(name) diff --git a/graphblas/core/ss/select.py b/graphblas/core/ss/select.py index 4a82791ba..37c352b67 100644 --- a/graphblas/core/ss/select.py +++ b/graphblas/core/ss/select.py @@ -6,14 +6,6 @@ from . import _IS_SSGB7 ffi_new = ffi.new -if _IS_SSGB7: - # JIT was introduced in SuiteSparse:GraphBLAS 8.0 - import suitesparse_graphblas as ssgb - - raise ImportError( - "JIT was added to SuiteSparse:GraphBLAS in version 8; " - f"current version is {ssgb.__version__}" - ) class TypedJitSelectOp(TypedOpBase): @@ -32,14 +24,21 @@ def jit_c_definition(self): def register_new(name, jit_c_definition, input_type, thunk_type): - if backend != "suitesparse": + if backend != "suitesparse": # pragma: no cover (safety) raise RuntimeError( "`gb.select.ss.register_new` invalid when not using 'suitesparse' backend" ) + if _IS_SSGB7: + # JIT was introduced in SuiteSparse:GraphBLAS 8.0 + import suitesparse_graphblas as ssgb + + raise RuntimeError( + "JIT was added to SuiteSparse:GraphBLAS in version 8; " + f"current version is {ssgb.__version__}" + ) input_type = lookup_dtype(input_type) thunk_type = lookup_dtype(thunk_type) - if not name.startswith("ss."): - name = f"ss.{name}" + name = name if name.startswith("ss.") else f"ss.{name}" # Register to both `gb.indexunary.ss` and `gb.select.ss.` indexunary.ss.register_new(name, jit_c_definition, input_type, thunk_type, BOOL) module, funcname = SelectOp._remove_nesting(name, strict=False) diff --git a/graphblas/core/ss/unary.py b/graphblas/core/ss/unary.py index 577fcffc4..97c4614c0 100644 --- a/graphblas/core/ss/unary.py +++ b/graphblas/core/ss/unary.py @@ -7,14 +7,6 @@ from . import _IS_SSGB7 ffi_new = ffi.new -if _IS_SSGB7: - # JIT was introduced in SuiteSparse:GraphBLAS 8.0 - import suitesparse_graphblas as ssgb - - raise ImportError( - "JIT was added to SuiteSparse:GraphBLAS in version 8; " - f"current version is {ssgb.__version__}" - ) class TypedJitUnaryOp(TypedOpBase): @@ -33,14 +25,21 @@ def jit_c_definition(self): def register_new(name, jit_c_definition, input_type, ret_type): - if backend != "suitesparse": + if backend != "suitesparse": # pragma: no cover (safety) raise RuntimeError( "`gb.unary.ss.register_new` invalid when not using 'suitesparse' backend" ) + if _IS_SSGB7: + # JIT was introduced in SuiteSparse:GraphBLAS 8.0 + import suitesparse_graphblas as ssgb + + raise RuntimeError( + "JIT was added to SuiteSparse:GraphBLAS in version 8; " + f"current version is {ssgb.__version__}" + ) input_type = lookup_dtype(input_type) ret_type = lookup_dtype(ret_type) - if not name.startswith("ss."): - name = f"ss.{name}" + name = name if name.startswith("ss.") else f"ss.{name}" module, funcname = UnaryOp._remove_nesting(name) rv = UnaryOp(name) diff --git a/graphblas/dtypes/ss.py b/graphblas/dtypes/ss.py index d8c8b6b09..9f6083e01 100644 --- a/graphblas/dtypes/ss.py +++ b/graphblas/dtypes/ss.py @@ -1,4 +1 @@ -from ..core.ss import _IS_SSGB7 - -if not _IS_SSGB7: - from ..core.ss.dtypes import register_new # noqa: F401 +from ..core.ss.dtypes import register_new # noqa: F401 diff --git a/graphblas/indexunary/ss.py b/graphblas/indexunary/ss.py index 47c906de5..58218df6f 100644 --- a/graphblas/indexunary/ss.py +++ b/graphblas/indexunary/ss.py @@ -1,8 +1,5 @@ from ..core import operator -from ..core.ss import _IS_SSGB7 - -if not _IS_SSGB7: - from ..core.ss.indexunary import register_new # noqa: F401 +from ..core.ss.indexunary import register_new # noqa: F401 _delayed = {} diff --git a/graphblas/select/ss.py b/graphblas/select/ss.py index ed7cfbe9a..173067382 100644 --- a/graphblas/select/ss.py +++ b/graphblas/select/ss.py @@ -1,8 +1,5 @@ from ..core import operator -from ..core.ss import _IS_SSGB7 - -if not _IS_SSGB7: - from ..core.ss.select import register_new # noqa: F401 +from ..core.ss.select import register_new # noqa: F401 _delayed = {} diff --git a/graphblas/tests/test_ssjit.py b/graphblas/tests/test_ssjit.py index d67cd04ab..54b1ec647 100644 --- a/graphblas/tests/test_ssjit.py +++ b/graphblas/tests/test_ssjit.py @@ -8,6 +8,7 @@ import graphblas as gb from graphblas import backend, binary, dtypes, indexunary, select, unary from graphblas.core import _supports_udfs as supports_udfs +from graphblas.core.ss import _IS_SSGB7 from .conftest import autocompute, burble @@ -20,8 +21,6 @@ if backend != "suitesparse": pytest.skip("not suitesparse backend", allow_module_level=True) -if gb.ss.about["library_version"][0] < 8: - pytest.skip("not SuiteSparse:GraphBLAS >=8", allow_module_level=True) if sys.platform == "darwin": pytest.skip("SuiteSparse JIT tests not yet working on macos", allow_module_level=True) @@ -30,7 +29,7 @@ def _setup_jit(): # Configuration values below were obtained from the output of the JIT config # in CI, but with paths changed to use `{conda_prefix}` where appropriate. - if "CONDA_PREFIX" not in os.environ: + if "CONDA_PREFIX" not in os.environ or _IS_SSGB7: return conda_prefix = os.environ["CONDA_PREFIX"] gb.ss.config["jit_c_control"] = "on" @@ -73,7 +72,7 @@ def _setup_jit(): ) gb.ss.config["jit_c_libraries"] = f"-lm -ldl {conda_prefix}/lib/libomp.dylib" gb.ss.config["jit_c_cmake_libs"] = f"m;dl;{conda_prefix}/lib/libomp.dylib" - elif sys.platform == "win32": + elif sys.platform == "win32": # pragma: no branch (sanity) pass @@ -84,6 +83,12 @@ def v(): @autocompute def test_jit_udt(): + if _IS_SSGB7: + with pytest.raises(RuntimeError, match="JIT was added"): + dtypes.ss.register_new( + "myquaternion", "typedef struct { float x [4][4] ; int color ; } myquaternion ;" + ) + return with burble(): dtype = dtypes.ss.register_new( "myquaternion", "typedef struct { float x [4][4] ; int color ; } myquaternion ;" @@ -113,7 +118,7 @@ def test_jit_udt(): v[1] = (2, 3) if supports_udfs: expected = Vector.from_dense([100, 3]) - assert expected.isequal(v.apply(lambda x: x["color"])) + assert expected.isequal(v.apply(lambda x: x["color"])) # pragma: no cover (numba) np_type = np.dtype([("x", " Date: Sun, 2 Jul 2023 00:05:08 -0500 Subject: [PATCH 29/40] Try this --- .github/workflows/test_and_build.yml | 7 ++++--- graphblas/tests/test_ssjit.py | 4 ++-- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index c24068d7b..38a2fe6b6 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -263,14 +263,15 @@ jobs: echo "versions: np${npver} sp${spver} pd${pdver} ak${akver} nx${nxver} numba${numbaver} yaml${yamlver} sparse${sparsever} psgver${psgver}" set -x # echo on - $(command -v mamba || command -v conda) install packaging pytest coverage coveralls=3.3.1 pytest-randomly cffi donfig tomli c-compiler cmake make m4 \ + $(command -v mamba || command -v conda) install packaging pytest coverage coveralls=3.3.1 pytest-randomly cffi donfig tomli c-compiler cmake make \ pyyaml${yamlver} ${sparse} pandas${pdver} scipy${spver} numpy${npver} ${awkward} \ networkx${nxver} ${numba} ${fmm} ${psg} \ ${{ matrix.slowtask == 'pytest_bizarro' && 'black' || '' }} \ ${{ matrix.slowtask == 'notebooks' && 'matplotlib nbconvert jupyter "ipython>=7"' || '' }} \ ${{ steps.sourcetype.outputs.selected == 'upstream' && 'cython' || '' }} \ ${{ steps.sourcetype.outputs.selected != 'wheel' && '"graphblas>=7.4"' || '' }} \ - ${{ contains(steps.pyver.outputs.selected, 'pypy') && 'pypy' || '' }} + ${{ contains(steps.pyver.outputs.selected, 'pypy') && 'pypy' || '' }} \ + ${{ matrix.os != 'windows-latest' && 'm4' || '' }} - name: Build extension module run: | if [[ ${{ steps.sourcetype.outputs.selected }} == "wheel" ]]; then @@ -295,7 +296,7 @@ jobs: run: | # Don't use our conftest.py ; allow `test_print_jit_config` to fail if it doesn't exist (cd .. - pytest --pyargs suitesparse_graphblas -s -k test_print_jit_config + pytest --pyargs suitesparse_graphblas -s -k test_print_jit_config || true pytest -v --pyargs suitesparse_graphblas) - name: Unit tests run: | diff --git a/graphblas/tests/test_ssjit.py b/graphblas/tests/test_ssjit.py index 54b1ec647..51b248a79 100644 --- a/graphblas/tests/test_ssjit.py +++ b/graphblas/tests/test_ssjit.py @@ -21,8 +21,8 @@ if backend != "suitesparse": pytest.skip("not suitesparse backend", allow_module_level=True) -if sys.platform == "darwin": - pytest.skip("SuiteSparse JIT tests not yet working on macos", allow_module_level=True) +# if sys.platform == "darwin": +# pytest.skip("SuiteSparse JIT tests not yet working on macos", allow_module_level=True) @pytest.fixture(scope="module", autouse=True) From 1add974ece1b0f90ce66049ba574184fb3d6f18e Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Sun, 2 Jul 2023 00:17:52 -0500 Subject: [PATCH 30/40] Still experimenting --- .github/workflows/test_and_build.yml | 2 +- graphblas/tests/test_ssjit.py | 4 ++-- pyproject.toml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index 38a2fe6b6..aa517c5fe 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -263,7 +263,7 @@ jobs: echo "versions: np${npver} sp${spver} pd${pdver} ak${akver} nx${nxver} numba${numbaver} yaml${yamlver} sparse${sparsever} psgver${psgver}" set -x # echo on - $(command -v mamba || command -v conda) install packaging pytest coverage coveralls=3.3.1 pytest-randomly cffi donfig tomli c-compiler cmake make \ + $(command -v mamba || command -v conda) install packaging pytest coverage coveralls=3.3.1 pytest-randomly cffi donfig tomli c-compiler cxx-compiler cmake make \ pyyaml${yamlver} ${sparse} pandas${pdver} scipy${spver} numpy${npver} ${awkward} \ networkx${nxver} ${numba} ${fmm} ${psg} \ ${{ matrix.slowtask == 'pytest_bizarro' && 'black' || '' }} \ diff --git a/graphblas/tests/test_ssjit.py b/graphblas/tests/test_ssjit.py index 51b248a79..84c852a9c 100644 --- a/graphblas/tests/test_ssjit.py +++ b/graphblas/tests/test_ssjit.py @@ -57,8 +57,8 @@ def _setup_jit(): ) elif sys.platform == "darwin": # pragma: no cover # This is not yet working in CI - # gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/bin/cc" - gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/x86_64-apple-darwin13.4.0-clang" + gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/bin/cc" + # gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/x86_64-apple-darwin13.4.0-clang" gb.ss.config["jit_c_compiler_flags"] = ( "-march=core2 -mtune=haswell -mssse3 -ftree-vectorize -fPIC -fPIE " f"-fstack-protector-strong -O2 -pipe -isystem {conda_prefix}/include -DGBNCPUFEAT " diff --git a/pyproject.toml b/pyproject.toml index 7755a132b..bd72e9fc7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -171,7 +171,7 @@ line_length = 100 [tool.pytest.ini_options] minversion = "6.0" testpaths = "graphblas/tests" -xfail_strict = true +xfail_strict = false # 2023-07-23: awkward and numpy 1.25 sometimes conflict addopts = [ "--strict-config", # Force error if config is mispelled "--strict-markers", # Force error if marker is mispelled (must be defined in config) From 1109d02db56cad0f3d6188aff327c387f6150daf Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Sun, 2 Jul 2023 00:35:27 -0500 Subject: [PATCH 31/40] Another try --- .github/workflows/test_and_build.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index aa517c5fe..12972e971 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -263,7 +263,7 @@ jobs: echo "versions: np${npver} sp${spver} pd${pdver} ak${akver} nx${nxver} numba${numbaver} yaml${yamlver} sparse${sparsever} psgver${psgver}" set -x # echo on - $(command -v mamba || command -v conda) install packaging pytest coverage coveralls=3.3.1 pytest-randomly cffi donfig tomli c-compiler cxx-compiler cmake make \ + $(command -v mamba || command -v conda) install packaging pytest coverage coveralls=3.3.1 pytest-randomly cffi donfig tomli c-compiler make \ pyyaml${yamlver} ${sparse} pandas${pdver} scipy${spver} numpy${npver} ${awkward} \ networkx${nxver} ${numba} ${fmm} ${psg} \ ${{ matrix.slowtask == 'pytest_bizarro' && 'black' || '' }} \ @@ -271,7 +271,7 @@ jobs: ${{ steps.sourcetype.outputs.selected == 'upstream' && 'cython' || '' }} \ ${{ steps.sourcetype.outputs.selected != 'wheel' && '"graphblas>=7.4"' || '' }} \ ${{ contains(steps.pyver.outputs.selected, 'pypy') && 'pypy' || '' }} \ - ${{ matrix.os != 'windows-latest' && 'm4' || '' }} + ${{ matrix.os == 'windows-latest' && 'cmake' || 'm4' }} - name: Build extension module run: | if [[ ${{ steps.sourcetype.outputs.selected }} == "wheel" ]]; then From 3c234f378639aa9c7e591f58ab1f61eed82a09af Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Sun, 2 Jul 2023 00:36:11 -0500 Subject: [PATCH 32/40] skip SS JIT on macos again --- graphblas/tests/test_ssjit.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/graphblas/tests/test_ssjit.py b/graphblas/tests/test_ssjit.py index 84c852a9c..d46a4d453 100644 --- a/graphblas/tests/test_ssjit.py +++ b/graphblas/tests/test_ssjit.py @@ -21,8 +21,8 @@ if backend != "suitesparse": pytest.skip("not suitesparse backend", allow_module_level=True) -# if sys.platform == "darwin": -# pytest.skip("SuiteSparse JIT tests not yet working on macos", allow_module_level=True) +if sys.platform == "darwin": + pytest.skip("SuiteSparse JIT tests not yet working on macos", allow_module_level=True) @pytest.fixture(scope="module", autouse=True) From d3e2a4f756abdf62c9b463c64f52ce64ec006063 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Sun, 2 Jul 2023 00:46:55 -0500 Subject: [PATCH 33/40] Oops, forgot to commit this when trying clang --- graphblas/tests/test_ssjit.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/graphblas/tests/test_ssjit.py b/graphblas/tests/test_ssjit.py index d46a4d453..6f3979cc4 100644 --- a/graphblas/tests/test_ssjit.py +++ b/graphblas/tests/test_ssjit.py @@ -21,8 +21,8 @@ if backend != "suitesparse": pytest.skip("not suitesparse backend", allow_module_level=True) -if sys.platform == "darwin": - pytest.skip("SuiteSparse JIT tests not yet working on macos", allow_module_level=True) +# if sys.platform == "darwin": +# pytest.skip("SuiteSparse JIT tests not yet working on macos", allow_module_level=True) @pytest.fixture(scope="module", autouse=True) @@ -57,7 +57,7 @@ def _setup_jit(): ) elif sys.platform == "darwin": # pragma: no cover # This is not yet working in CI - gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/bin/cc" + gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/bin/clang" # gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/x86_64-apple-darwin13.4.0-clang" gb.ss.config["jit_c_compiler_flags"] = ( "-march=core2 -mtune=haswell -mssse3 -ftree-vectorize -fPIC -fPIE " From c9b3bebf1b5bbacfceb6d76941fde32131af5bc8 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Sun, 2 Jul 2023 01:05:51 -0500 Subject: [PATCH 34/40] Don't use `-isysroot` for SS JIT on macos --- .github/workflows/test_and_build.yml | 8 ++++++-- graphblas/tests/conftest.py | 2 ++ graphblas/tests/test_ssjit.py | 6 +++--- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index 12972e971..bb139eae0 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -90,7 +90,8 @@ jobs: # This should ensure we'll have full code coverage (i.e., no chance of getting unlucky), # since we need to run all slow tests on Windows and non-Windoes OSes. matrix: - os: ["ubuntu-latest", "macos-latest", "windows-latest"] + # os: ["ubuntu-latest", "macos-latest", "windows-latest"] + os: ["macos-latest", "macos-latest"] slowtask: ["pytest_normal", "pytest_bizarro", "notebooks"] env: # Wheels on OS X come with an OpenMP that conflicts with OpenMP from conda-forge. @@ -205,12 +206,15 @@ jobs: psg="" if [[ ${{ steps.sourcetype.outputs.selected}} == "conda-forge" ]] ; then psgver=$(python -c 'import random ; print(random.choice(["=7.4.0", "=7.4.1", "=7.4.2", "=7.4.3.0", "=7.4.3.1", "=7.4.3.2", "=8.0.2.1", ""]))') + psgver=$(python -c 'import random ; print(random.choice(["=8.0.2.1", ""]))') # XXX psg=python-suitesparse-graphblas${psgver} elif [[ ${{ steps.sourcetype.outputs.selected}} == "wheel" ]] ; then psgver=$(python -c 'import random ; print(random.choice(["==7.4.3.2", "==8.0.2.1", ""]))') + psgver=$(python -c 'import random ; print(random.choice(["==8.0.2.1", ""]))') # XXX elif [[ ${{ steps.sourcetype.outputs.selected}} == "source" ]] ; then # These should be exact versions psgver=$(python -c 'import random ; print(random.choice(["==7.4.0.0", "==7.4.1.0", "==7.4.2.0", "==7.4.3.0", "==7.4.3.1", "==7.4.3.2", "==8.0.2.1", ""]))') + psgver=$(python -c 'import random ; print(random.choice(["==8.0.2.1", ""]))') # XXX else psgver="" fi @@ -260,7 +264,7 @@ jobs: numba=numba${numbaver} sparse=sparse${sparsever} fi - echo "versions: np${npver} sp${spver} pd${pdver} ak${akver} nx${nxver} numba${numbaver} yaml${yamlver} sparse${sparsever} psgver${psgver}" + echo "versions: np${npver} sp${spver} pd${pdver} ak${akver} nx${nxver} numba${numbaver} yaml${yamlver} sparse${sparsever} psg${psgver}" set -x # echo on $(command -v mamba || command -v conda) install packaging pytest coverage coveralls=3.3.1 pytest-randomly cffi donfig tomli c-compiler make \ diff --git a/graphblas/tests/conftest.py b/graphblas/tests/conftest.py index ce9e6488f..89d492b45 100644 --- a/graphblas/tests/conftest.py +++ b/graphblas/tests/conftest.py @@ -48,6 +48,8 @@ def pytest_configure(config): gb.config.set(autocompute=False, mapnumpy=mapnumpy) + runslow = False # XXX + backend = "suitesparse" # XXX gb.init(backend, blocking=blocking) print( f"Running tests with {backend!r} backend, blocking={blocking}, " diff --git a/graphblas/tests/test_ssjit.py b/graphblas/tests/test_ssjit.py index 6f3979cc4..ca8108ac9 100644 --- a/graphblas/tests/test_ssjit.py +++ b/graphblas/tests/test_ssjit.py @@ -62,9 +62,9 @@ def _setup_jit(): gb.ss.config["jit_c_compiler_flags"] = ( "-march=core2 -mtune=haswell -mssse3 -ftree-vectorize -fPIC -fPIE " f"-fstack-protector-strong -O2 -pipe -isystem {conda_prefix}/include -DGBNCPUFEAT " - "-Wno-pointer-sign -O3 -DNDEBUG -fopenmp=libomp -fPIC -arch x86_64 -isysroot " - "/Applications/Xcode_13.2.1.app/Contents/Developer/Platforms/MacOSX.platform" - "/Developer/SDKs/MacOSX10.9.sdk" + "-Wno-pointer-sign -O3 -DNDEBUG -fopenmp=libomp -fPIC -arch x86_64 " + # "-isysroot /Applications/Xcode_13.2.1.app/Contents/Developer/Platforms" + # "/MacOSX.platform/Developer/SDKs/MacOSX10.9.sdk" ) gb.ss.config["jit_c_linker_flags"] = ( "-Wl,-pie -Wl,-headerpad_max_install_names -Wl,-dead_strip_dylibs " From 86bddfc0331015247dccfea2c12fe260e97fddd5 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Sun, 2 Jul 2023 01:17:07 -0500 Subject: [PATCH 35/40] Clean up; SS JIT working on macos in CI now! --- .github/workflows/test_and_build.yml | 3 --- graphblas/tests/conftest.py | 2 -- graphblas/tests/test_ssjit.py | 10 ++-------- 3 files changed, 2 insertions(+), 13 deletions(-) diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index bb139eae0..0c0474018 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -206,15 +206,12 @@ jobs: psg="" if [[ ${{ steps.sourcetype.outputs.selected}} == "conda-forge" ]] ; then psgver=$(python -c 'import random ; print(random.choice(["=7.4.0", "=7.4.1", "=7.4.2", "=7.4.3.0", "=7.4.3.1", "=7.4.3.2", "=8.0.2.1", ""]))') - psgver=$(python -c 'import random ; print(random.choice(["=8.0.2.1", ""]))') # XXX psg=python-suitesparse-graphblas${psgver} elif [[ ${{ steps.sourcetype.outputs.selected}} == "wheel" ]] ; then psgver=$(python -c 'import random ; print(random.choice(["==7.4.3.2", "==8.0.2.1", ""]))') - psgver=$(python -c 'import random ; print(random.choice(["==8.0.2.1", ""]))') # XXX elif [[ ${{ steps.sourcetype.outputs.selected}} == "source" ]] ; then # These should be exact versions psgver=$(python -c 'import random ; print(random.choice(["==7.4.0.0", "==7.4.1.0", "==7.4.2.0", "==7.4.3.0", "==7.4.3.1", "==7.4.3.2", "==8.0.2.1", ""]))') - psgver=$(python -c 'import random ; print(random.choice(["==8.0.2.1", ""]))') # XXX else psgver="" fi diff --git a/graphblas/tests/conftest.py b/graphblas/tests/conftest.py index 89d492b45..ce9e6488f 100644 --- a/graphblas/tests/conftest.py +++ b/graphblas/tests/conftest.py @@ -48,8 +48,6 @@ def pytest_configure(config): gb.config.set(autocompute=False, mapnumpy=mapnumpy) - runslow = False # XXX - backend = "suitesparse" # XXX gb.init(backend, blocking=blocking) print( f"Running tests with {backend!r} backend, blocking={blocking}, " diff --git a/graphblas/tests/test_ssjit.py b/graphblas/tests/test_ssjit.py index ca8108ac9..73cce93b2 100644 --- a/graphblas/tests/test_ssjit.py +++ b/graphblas/tests/test_ssjit.py @@ -21,8 +21,6 @@ if backend != "suitesparse": pytest.skip("not suitesparse backend", allow_module_level=True) -# if sys.platform == "darwin": -# pytest.skip("SuiteSparse JIT tests not yet working on macos", allow_module_level=True) @pytest.fixture(scope="module", autouse=True) @@ -55,16 +53,12 @@ def _setup_jit(): f"m;dl;{conda_prefix}/lib/libgomp.so;" f"{conda_prefix}/x86_64-conda-linux-gnu/sysroot/usr/lib/libpthread.so" ) - elif sys.platform == "darwin": # pragma: no cover - # This is not yet working in CI + elif sys.platform == "darwin": gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/bin/clang" - # gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/x86_64-apple-darwin13.4.0-clang" gb.ss.config["jit_c_compiler_flags"] = ( "-march=core2 -mtune=haswell -mssse3 -ftree-vectorize -fPIC -fPIE " f"-fstack-protector-strong -O2 -pipe -isystem {conda_prefix}/include -DGBNCPUFEAT " - "-Wno-pointer-sign -O3 -DNDEBUG -fopenmp=libomp -fPIC -arch x86_64 " - # "-isysroot /Applications/Xcode_13.2.1.app/Contents/Developer/Platforms" - # "/MacOSX.platform/Developer/SDKs/MacOSX10.9.sdk" + "-Wno-pointer-sign -O3 -DNDEBUG -fopenmp=libomp -fPIC -arch x86_64" ) gb.ss.config["jit_c_linker_flags"] = ( "-Wl,-pie -Wl,-headerpad_max_install_names -Wl,-dead_strip_dylibs " From 546814b47f677e4f20b7ede42941edab42e938c2 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Wed, 5 Jul 2023 10:43:13 -0500 Subject: [PATCH 36/40] Clean up; change dependency metadata to only use SS:GB 7 --- .github/workflows/test_and_build.yml | 13 ++++++------- .pre-commit-config.yaml | 4 ++-- pyproject.toml | 7 +++++-- scripts/check_versions.sh | 2 +- 4 files changed, 14 insertions(+), 12 deletions(-) diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index 0c0474018..d93b4c25c 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -85,13 +85,12 @@ jobs: shell: bash -l {0} strategy: # To "stress test" in CI, set `fail-fast` to `false` and perhaps add more items to `matrix.slowtask` - fail-fast: false # XXX TODO FIXME temporary + fail-fast: true # The build matrix is [os]x[slowtask] and then randomly chooses [pyver] and [sourcetype]. # This should ensure we'll have full code coverage (i.e., no chance of getting unlucky), # since we need to run all slow tests on Windows and non-Windoes OSes. matrix: - # os: ["ubuntu-latest", "macos-latest", "windows-latest"] - os: ["macos-latest", "macos-latest"] + os: ["ubuntu-latest", "macos-latest", "windows-latest"] slowtask: ["pytest_normal", "pytest_bizarro", "notebooks"] env: # Wheels on OS X come with an OpenMP that conflicts with OpenMP from conda-forge. @@ -176,22 +175,22 @@ jobs: npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", ""]))') spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=1.2", "=1.3", "=1.4", "=1.5", "=2.0", ""]))') - akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", ""]))') + akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", "=2.3", ""]))') elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.9') }} == true ]]; then npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", "=1.25", ""]))') spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", "=1.11", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=1.2", "=1.3", "=1.4", "=1.5", "=2.0", ""]))') - akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", ""]))') + akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", "=2.3", ""]))') elif [[ ${{ startsWith(steps.pyver.outputs.selected, '3.10') }} == true ]]; then npver=$(python -c 'import random ; print(random.choice(["=1.21", "=1.22", "=1.23", "=1.24", "=1.25", ""]))') spver=$(python -c 'import random ; print(random.choice(["=1.8", "=1.9", "=1.10", "=1.11", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=1.3", "=1.4", "=1.5", "=2.0", ""]))') - akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", ""]))') + akver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=2.0", "=2.1", "=2.2", "=2.3", ""]))') else # Python 3.11 npver=$(python -c 'import random ; print(random.choice(["=1.23", "=1.24", "=1.25", ""]))') spver=$(python -c 'import random ; print(random.choice(["=1.9", "=1.10", "=1.11", ""]))') pdver=$(python -c 'import random ; print(random.choice(["=1.5", "=2.0", ""]))') - akver=$(python -c 'import random ; print(random.choice(["=1.10", "=2.0", "=2.1", "=2.2", ""]))') + akver=$(python -c 'import random ; print(random.choice(["=1.10", "=2.0", "=2.1", "=2.2", "=2.3", ""]))') fi if [[ ${{ steps.sourcetype.outputs.selected }} == "source" || ${{ steps.sourcetype.outputs.selected }} == "upstream" ]]; then # TODO: there are currently issues with some numpy versions when diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 39567e4f1..726538e16 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -66,7 +66,7 @@ repos: - id: black - id: black-jupyter - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.275 + rev: v0.0.277 hooks: - id: ruff args: [--fix-only, --show-fixes] @@ -94,7 +94,7 @@ repos: additional_dependencies: [tomli] files: ^(graphblas|docs)/ - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.275 + rev: v0.0.277 hooks: - id: ruff - repo: https://github.com/sphinx-contrib/sphinx-lint diff --git a/pyproject.toml b/pyproject.toml index bd72e9fc7..fdd3a7a94 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -63,7 +63,7 @@ dependencies = [ "pyyaml >=5.4", # These won't be installed by default after 2024.3.0 # Use e.g. "python-graphblas[suitesparse]" or "python-graphblas[default]" instead - "suitesparse-graphblas >=7.4.0.0, <8.1", + "suitesparse-graphblas >=7.4.0.0, <7.5", "numba >=0.55; python_version<'3.12'", # make optional where numba is not supported ] @@ -75,7 +75,7 @@ changelog = "https://github.com/python-graphblas/python-graphblas/releases" [project.optional-dependencies] suitesparse = [ - "suitesparse-graphblas >=7.4.0.0, <8.1", + "suitesparse-graphblas >=7.4.0.0, <7.5", ] networkx = [ "networkx >=2.8", @@ -313,6 +313,7 @@ ignore = [ "TRY004", # Prefer `TypeError` exception for invalid type (Note: good advice, but not worth the nuisance) "TRY200", # Use `raise from` to specify exception cause (Note: sometimes okay to raise original exception) "RUF012", # Mutable class attributes should be annotated with `typing.ClassVar` (Note: no annotations yet) + "PERF401", # Use a list comprehension to create a transformed list (Note: poorly implemented atm) # Intentionally ignored "COM812", # Trailing comma missing @@ -324,6 +325,8 @@ ignore = [ "N806", # Variable ... in function should be lowercase "N807", # Function name should not start and end with `__` "N818", # Exception name ... should be named with an Error suffix (Note: good advice) + "PERF203", # `try`-`except` within a loop incurs performance overhead (Note: too strict) + "PLC0205", # Class `__slots__` should be a non-string iterable (Note: string is fine) "PLR0124", # Name compared with itself, consider replacing `x == x` (Note: too strict) "PLR0911", # Too many return statements "PLR0912", # Too many branches diff --git a/scripts/check_versions.sh b/scripts/check_versions.sh index b6622a56e..ef1a76135 100755 --- a/scripts/check_versions.sh +++ b/scripts/check_versions.sh @@ -7,7 +7,7 @@ conda search 'numpy[channel=conda-forge]>=1.25.0' conda search 'pandas[channel=conda-forge]>=2.0.3' conda search 'scipy[channel=conda-forge]>=1.11.1' conda search 'networkx[channel=conda-forge]>=3.1' -conda search 'awkward[channel=conda-forge]>=2.2.4' +conda search 'awkward[channel=conda-forge]>=2.3.0' conda search 'sparse[channel=conda-forge]>=0.14.0' conda search 'fast_matrix_market[channel=conda-forge]>=1.7.2' conda search 'numba[channel=conda-forge]>=0.57.1' From fdd4cadcef6473f9f274f2e10378a61827bc3ded Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Wed, 5 Jul 2023 13:03:35 -0500 Subject: [PATCH 37/40] Test python-suitespare-graphblas 8 wheels --- .github/workflows/test_and_build.yml | 3 ++- graphblas/tests/test_ssjit.py | 9 ++++++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index d93b4c25c..2983f6077 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -132,7 +132,7 @@ jobs: upstream weights: | 1 - 1 + 1000000000 1 1 - name: Setup mamba @@ -208,6 +208,7 @@ jobs: psg=python-suitesparse-graphblas${psgver} elif [[ ${{ steps.sourcetype.outputs.selected}} == "wheel" ]] ; then psgver=$(python -c 'import random ; print(random.choice(["==7.4.3.2", "==8.0.2.1", ""]))') + psgver=$(python -c 'import random ; print(random.choice(["==8.0.2.1", ""]))') # XXX elif [[ ${{ steps.sourcetype.outputs.selected}} == "source" ]] ; then # These should be exact versions psgver=$(python -c 'import random ; print(random.choice(["==7.4.0.0", "==7.4.1.0", "==7.4.2.0", "==7.4.3.0", "==7.4.3.1", "==7.4.3.2", "==8.0.2.1", ""]))') diff --git a/graphblas/tests/test_ssjit.py b/graphblas/tests/test_ssjit.py index 73cce93b2..ce1b09ce5 100644 --- a/graphblas/tests/test_ssjit.py +++ b/graphblas/tests/test_ssjit.py @@ -67,7 +67,14 @@ def _setup_jit(): gb.ss.config["jit_c_libraries"] = f"-lm -ldl {conda_prefix}/lib/libomp.dylib" gb.ss.config["jit_c_cmake_libs"] = f"m;dl;{conda_prefix}/lib/libomp.dylib" elif sys.platform == "win32": # pragma: no branch (sanity) - pass + gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/bin/cc" + gb.ss.config["jit_c_compiler_flags"] = ( + '/DWIN32 /D_WINDOWS -DGBNCPUFEAT /O2 -wd"4244" -wd"4146" -wd"4018" ' + '-wd"4996" -wd"4047" -wd"4554" /O2 /Ob2 /DNDEBUG -openmp' + ) + gb.ss.config["jit_c_linker_flags"] = "/machine:x64" + gb.ss.config["jit_c_libraries"] = "" + gb.ss.config["jit_c_cmake_libs"] = "" @pytest.fixture From aee83f8358b01513119b094fce3d26b3f0efd88c Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Wed, 5 Jul 2023 15:51:48 -0500 Subject: [PATCH 38/40] hmm, don't test JIT on Windows with suitesparse-graphblas wheels --- graphblas/tests/test_ssjit.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/graphblas/tests/test_ssjit.py b/graphblas/tests/test_ssjit.py index ce1b09ce5..57cb2bbba 100644 --- a/graphblas/tests/test_ssjit.py +++ b/graphblas/tests/test_ssjit.py @@ -67,6 +67,12 @@ def _setup_jit(): gb.ss.config["jit_c_libraries"] = f"-lm -ldl {conda_prefix}/lib/libomp.dylib" gb.ss.config["jit_c_cmake_libs"] = f"m;dl;{conda_prefix}/lib/libomp.dylib" elif sys.platform == "win32": # pragma: no branch (sanity) + if "mingw" in gb.ss.config["jit_c_libraries"]: + # This probably means we're testing a `python-suitesparse-graphblas` wheel + # in a conda environment. This is not yet working. + gb.ss.config["jit_c_control"] = "off" + return + gb.ss.config["jit_c_compiler_name"] = f"{conda_prefix}/bin/cc" gb.ss.config["jit_c_compiler_flags"] = ( '/DWIN32 /D_WINDOWS -DGBNCPUFEAT /O2 -wd"4244" -wd"4146" -wd"4018" ' @@ -90,6 +96,8 @@ def test_jit_udt(): "myquaternion", "typedef struct { float x [4][4] ; int color ; } myquaternion ;" ) return + if gb.ss.config["jit_c_control"] == "off": + return with burble(): dtype = dtypes.ss.register_new( "myquaternion", "typedef struct { float x [4][4] ; int color ; } myquaternion ;" @@ -136,6 +144,8 @@ def test_jit_unary(v): with pytest.raises(RuntimeError, match="JIT was added"): unary.ss.register_new("square", cdef, "FP32", "FP32") return + if gb.ss.config["jit_c_control"] == "off": + return with burble(): square = unary.ss.register_new("square", cdef, "FP32", "FP32") assert not hasattr(unary, "square") @@ -158,6 +168,8 @@ def test_jit_binary(v): with pytest.raises(RuntimeError, match="JIT was added"): binary.ss.register_new("absdiff", cdef, "FP64", "FP64", "FP64") return + if gb.ss.config["jit_c_control"] == "off": + return with burble(): absdiff = binary.ss.register_new( "absdiff", @@ -192,6 +204,8 @@ def test_jit_indexunary(v): with pytest.raises(RuntimeError, match="JIT was added"): indexunary.ss.register_new("diffy", cdef, "FP64", "FP64", "FP64") return + if gb.ss.config["jit_c_control"] == "off": + return with burble(): diffy = indexunary.ss.register_new("diffy", cdef, "FP64", "FP64", "FP64") assert not hasattr(indexunary, "diffy") @@ -220,6 +234,8 @@ def test_jit_select(v): with pytest.raises(RuntimeError, match="JIT was added"): select.ss.register_new("woot", cdef, "INT32", "INT32") return + if gb.ss.config["jit_c_control"] == "off": + return with burble(): woot = select.ss.register_new("woot", cdef, "INT32", "INT32") assert not hasattr(select, "woot") From 8639b0befae822d3c45ed71a55cafec19398b576 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Wed, 5 Jul 2023 16:08:24 -0500 Subject: [PATCH 39/40] Clean up again --- .github/workflows/test_and_build.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/test_and_build.yml b/.github/workflows/test_and_build.yml index 2983f6077..d93b4c25c 100644 --- a/.github/workflows/test_and_build.yml +++ b/.github/workflows/test_and_build.yml @@ -132,7 +132,7 @@ jobs: upstream weights: | 1 - 1000000000 + 1 1 1 - name: Setup mamba @@ -208,7 +208,6 @@ jobs: psg=python-suitesparse-graphblas${psgver} elif [[ ${{ steps.sourcetype.outputs.selected}} == "wheel" ]] ; then psgver=$(python -c 'import random ; print(random.choice(["==7.4.3.2", "==8.0.2.1", ""]))') - psgver=$(python -c 'import random ; print(random.choice(["==8.0.2.1", ""]))') # XXX elif [[ ${{ steps.sourcetype.outputs.selected}} == "source" ]] ; then # These should be exact versions psgver=$(python -c 'import random ; print(random.choice(["==7.4.0.0", "==7.4.1.0", "==7.4.2.0", "==7.4.3.0", "==7.4.3.1", "==7.4.3.2", "==8.0.2.1", ""]))') From 6270abf17de72b4db30e467424945b49f7f7af97 Mon Sep 17 00:00:00 2001 From: Erik Welch Date: Wed, 5 Jul 2023 16:49:27 -0500 Subject: [PATCH 40/40] Update ss config doc for experimental GPU support --- graphblas/ss/_core.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/graphblas/ss/_core.py b/graphblas/ss/_core.py index b4ab63318..2639a7709 100644 --- a/graphblas/ss/_core.py +++ b/graphblas/ss/_core.py @@ -126,9 +126,16 @@ class GlobalConfig(BaseConfig): burble : bool Enable diagnostic printing from SuiteSparse:GraphBLAS print_1based : bool + gpu_control : str, {"always", "never"} + Only available for SuiteSparse:GraphBLAS 7 + **GPU support is a work in progress--not recommended to use** + gpu_chunk : double + Only available for SuiteSparse:GraphBLAS 7 + **GPU support is a work in progress--not recommended to use** gpu_id : int Which GPU to use; default is -1, which means do not run on the GPU. - **GPU support is a work in progress--do not use** + Only available for SuiteSparse:GraphBLAS 8 + **GPU support is a work in progress--not recommended to use** Setting values to None restores the default value for most configurations. """