diff --git a/IPython/__init__.py b/IPython/__init__.py index b7235481f21..fbfa04abad6 100644 --- a/IPython/__init__.py +++ b/IPython/__init__.py @@ -87,7 +87,7 @@ def embed_kernel(module=None, local_ns=None, **kwargs): **kwargs : various, optional Further keyword args are relayed to the IPKernelApp constructor, such as `config`, a traitlets :class:`Config` object (see :ref:`configure_start_ipython`), - allowing configuration of the kernel (see :ref:`kernel_options`). Will only have an effect + allowing configuration of the kernel. Will only have an effect on the first embed_kernel call for a given process. """ @@ -95,7 +95,7 @@ def embed_kernel(module=None, local_ns=None, **kwargs): if module is None: module = caller_module if local_ns is None: - local_ns = caller_locals + local_ns = dict(**caller_locals) # Only import .zmq when we really need it from ipykernel.embed import embed_kernel as real_embed_kernel @@ -150,7 +150,7 @@ def start_kernel(argv=None, **kwargs): **kwargs : various, optional Any other kwargs will be passed to the Application constructor, such as `config`, a traitlets :class:`Config` object (see :ref:`configure_start_ipython`), - allowing configuration of the kernel (see :ref:`kernel_options`). + allowing configuration of the kernel. """ import warnings diff --git a/IPython/core/completer.py b/IPython/core/completer.py index b39a9228103..93416edfd75 100644 --- a/IPython/core/completer.py +++ b/IPython/core/completer.py @@ -1145,16 +1145,61 @@ def attr_matches(self, text): # we simple attribute matching with normal identifiers. _ATTR_MATCH_RE = re.compile(r"(.+)\.(\w*)$") + def _strip_code_before_operator(self, code: str) -> str: + o_parens = {"(", "[", "{"} + c_parens = {")", "]", "}"} + + # Dry-run tokenize to catch errors + try: + _ = list(tokenize.generate_tokens(iter(code.splitlines()).__next__)) + except tokenize.TokenError: + # Try trimming the expression and retrying + trimmed_code = self._trim_expr(code) + try: + _ = list( + tokenize.generate_tokens(iter(trimmed_code.splitlines()).__next__) + ) + code = trimmed_code + except tokenize.TokenError: + return code + + tokens = _parse_tokens(code) + encountered_operator = False + after_operator = [] + nesting_level = 0 + + for t in tokens: + if t.type == tokenize.OP: + if t.string in o_parens: + nesting_level += 1 + elif t.string in c_parens: + nesting_level -= 1 + elif t.string != "." and nesting_level == 0: + encountered_operator = True + after_operator = [] + continue + + if encountered_operator: + after_operator.append(t.string) + + if encountered_operator: + return "".join(after_operator) + else: + return code + def _attr_matches( self, text: str, include_prefix: bool = True ) -> Tuple[Sequence[str], str]: - m2 = self._ATTR_MATCH_RE.match(self.line_buffer) + m2 = self._ATTR_MATCH_RE.match(text) if not m2: return [], "" expr, attr = m2.group(1, 2) + try: + expr = self._strip_code_before_operator(expr) + except tokenize.TokenError: + pass obj = self._evaluate_expr(expr) - if obj is not_found: return [], "" @@ -2356,11 +2401,163 @@ def _jedi_matches( else: return iter([]) + class _CompletionContextType(enum.Enum): + ATTRIBUTE = "attribute" # For attribute completion + GLOBAL = "global" # For global completion + + def _determine_completion_context(self, line): + """ + Determine whether the cursor is in an attribute or global completion context. + """ + # Cursor in string/comment → GLOBAL. + is_string, is_in_expression = self._is_in_string_or_comment(line) + if is_string and not is_in_expression: + return self._CompletionContextType.GLOBAL + + # If we're in a template string expression, handle specially + if is_string and is_in_expression: + # Extract the expression part - look for the last { that isn't closed + expr_start = line.rfind("{") + if expr_start >= 0: + # We're looking at the expression inside a template string + expr = line[expr_start + 1 :] + # Recursively determine the context of the expression + return self._determine_completion_context(expr) + + # Handle plain number literals - should be global context + # Ex: 3. -42.14 but not 3.1. + if re.search(r"(? 0, + ) + @context_matcher() def python_matcher(self, context: CompletionContext) -> SimpleMatcherResult: """Match attributes or global python names""" - text = context.line_with_cursor - if "." in text: + text = context.text_until_cursor + completion_type = self._determine_completion_context(text) + if completion_type == self._CompletionContextType.ATTRIBUTE: try: matches, fragment = self._attr_matches(text, include_prefix=False) if text.endswith(".") and self.omit__names: diff --git a/IPython/core/debugger.py b/IPython/core/debugger.py index 76c42e02309..1c4ea069b0a 100644 --- a/IPython/core/debugger.py +++ b/IPython/core/debugger.py @@ -248,13 +248,7 @@ def __init__(self, completekey=None, stdin=None, stdout=None, context=5, **kwarg docs for more info. """ - # Parent constructor: - try: - self.context = int(context) - if self.context <= 0: - raise ValueError("Context must be a positive integer") - except (TypeError, ValueError) as e: - raise ValueError("Context must be a positive integer") from e + self.context = int(context) # `kwargs` ensures full compatibility with stdlib's `pdb.Pdb`. OldPdb.__init__(self, completekey, stdin, stdout, **kwargs) @@ -490,14 +484,9 @@ def print_stack_trace(self, context=None): ColorsNormal = Colors.Normal if context is None: context = self.context - try: - context = int(context) - if context <= 0: - raise ValueError("Context must be a positive integer") - except (TypeError, ValueError) as e: - raise ValueError("Context must be a positive integer") from e try: skipped = 0 + to_print = "" for hidden, frame_lineno in zip(self.hidden_frames(self.stack), self.stack): if hidden and self.skip_hidden: skipped += 1 @@ -507,31 +496,21 @@ def print_stack_trace(self, context=None): f"{Colors.excName} [... skipping {skipped} hidden frame(s)]{ColorsNormal}\n" ) skipped = 0 - self.print_stack_entry(frame_lineno, context=context) + to_print += self.format_stack_entry(frame_lineno) if skipped: - print( + to_print += ( f"{Colors.excName} [... skipping {skipped} hidden frame(s)]{ColorsNormal}\n" ) + print(to_print, file=self.stdout) except KeyboardInterrupt: pass - def print_stack_entry(self, frame_lineno, prompt_prefix='\n-> ', - context=None): - if context is None: - context = self.context - try: - context = int(context) - if context <= 0: - raise ValueError("Context must be a positive integer") - except (TypeError, ValueError) as e: - raise ValueError("Context must be a positive integer") from e - print(self.format_stack_entry(frame_lineno, '', context), file=self.stdout) + def print_stack_entry(self, frame_lineno, prompt_prefix='\n-> '): + print(self.format_stack_entry(frame_lineno, ''), file=self.stdout) - # vds: >> frame, lineno = frame_lineno filename = frame.f_code.co_filename self.shell.hooks.synchronize_with_editor(filename, lineno, 0) - # vds: << def _get_frame_locals(self, frame): """ " @@ -555,15 +534,14 @@ def _get_frame_locals(self, frame): else: return frame.f_locals - def format_stack_entry(self, frame_lineno, lprefix=': ', context=None): - if context is None: - context = self.context + def format_stack_entry(self, frame_lineno, lprefix=': '): + context = self.context try: context = int(context) if context <= 0: print("Context must be a positive integer", file=self.stdout) except (TypeError, ValueError): - print("Context must be a positive integer", file=self.stdout) + print("Context must be a positive integer", file=self.stdout) import reprlib diff --git a/IPython/core/inputtransformer.py b/IPython/core/inputtransformer.py index bb1061e8dce..5229be43228 100644 --- a/IPython/core/inputtransformer.py +++ b/IPython/core/inputtransformer.py @@ -9,6 +9,7 @@ import functools import re import tokenize +import warnings from tokenize import untokenize, TokenError from io import StringIO @@ -42,7 +43,16 @@ class InputTransformer(metaclass=abc.ABCMeta): """Abstract base class for line-based input transformers.""" - + + def __init__(self): + warnings.warn( + "`InputTransformer` has been deprecated since IPython 7.0" + " and emit a warnig since IPython 8.31, it" + " will be removed in the future", + DeprecationWarning, + stacklevel=2, + ) + @abc.abstractmethod def push(self, line): """Send a line of input to the transformer, returning the transformed @@ -78,6 +88,14 @@ def transformer_factory(**kwargs): class StatelessInputTransformer(InputTransformer): """Wrapper for a stateless input transformer implemented as a function.""" def __init__(self, func): + super().__init__() + warnings.warn( + "`StatelessInputTransformer` has been deprecated since IPython 7.0" + " and emit a warnig since IPython 8.31, it" + " will be removed in the future", + DeprecationWarning, + stacklevel=2, + ) self.func = func def __repr__(self): @@ -96,6 +114,14 @@ class CoroutineInputTransformer(InputTransformer): """Wrapper for an input transformer implemented as a coroutine.""" def __init__(self, coro, **kwargs): # Prime it + super().__init__() + warnings.warn( + "`CoroutineInputTransformer` has been deprecated since IPython 7.0" + " and emit a warnig since IPython 8.31, it" + " will be removed in the future", + DeprecationWarning, + stacklevel=2, + ) self.coro = coro(**kwargs) next(self.coro) @@ -122,6 +148,13 @@ class TokenInputTransformer(InputTransformer): return an iterable which can be passed to tokenize.untokenize(). """ def __init__(self, func): + warnings.warn( + "`CoroutineInputTransformer` has been deprecated since IPython 7.0" + " and emit a warnig since IPython 8.31, it" + " will be removed in the future", + DeprecationWarning, + stacklevel=2, + ) self.func = func self.buf = [] self.reset_tokenizer() @@ -167,7 +200,7 @@ def reset(self): class assemble_python_lines(TokenInputTransformer): def __init__(self): - super(assemble_python_lines, self).__init__(None) + super().__init__(None) def output(self, tokens): return self.reset() diff --git a/IPython/core/interactiveshell.py b/IPython/core/interactiveshell.py index 07fb8077601..a341ab053a3 100644 --- a/IPython/core/interactiveshell.py +++ b/IPython/core/interactiveshell.py @@ -900,7 +900,7 @@ def init_virtualenv(self): return p = Path(sys.executable) - p_venv = Path(os.environ["VIRTUAL_ENV"]) + p_venv = Path(os.environ["VIRTUAL_ENV"]).resolve() # fallback venv detection: # stdlib venv may symlink sys.executable, so we can't use realpath. @@ -913,7 +913,7 @@ def init_virtualenv(self): drive_name = p_venv.parts[2] p_venv = (drive_name + ":/") / Path(*p_venv.parts[3:]) - if any(p_venv == p.parents[1] for p in paths): + if any(p_venv == p.parents[1].resolve() for p in paths): # Our exe is inside or has access to the virtualenv, don't need to do anything. return @@ -2093,6 +2093,8 @@ def _get_exc_info(self, exc_tuple=None): sys.last_type = etype sys.last_value = value sys.last_traceback = tb + if sys.version_info >= (3, 12): + sys.last_exc = value return etype, value, tb diff --git a/IPython/core/magics/execution.py b/IPython/core/magics/execution.py index 3aa0a27fc27..92453dae84f 100644 --- a/IPython/core/magics/execution.py +++ b/IPython/core/magics/execution.py @@ -151,10 +151,11 @@ def visit_For(self, node): class Timer(timeit.Timer): """Timer class that explicitly uses self.inner - + which is an undocumented implementation detail of CPython, not shared by PyPy. """ + # Timer.timeit copied from CPython 3.4.2 def timeit(self, number=timeit.default_number): """Time 'number' executions of the main statement. @@ -192,7 +193,6 @@ def __init__(self, shell): @no_var_expand @line_cell_magic def prun(self, parameter_s='', cell=None): - """Run a statement through the python code profiler. **Usage, in line mode:** @@ -977,9 +977,23 @@ def _run_with_debugger( break finally: sys.settrace(trace) - - except: + # Perform proper cleanup of the session in case if + # it exited with "continue" and not "quit" command + if hasattr(deb, "rcLines"): + # Run this code defensively in case if custom debugger + # class does not implement rcLines, which although public + # is an implementation detail of `pdb.Pdb` and not part of + # the more generic basic debugger framework (`bdb.Bdb`). + deb.set_quit() + deb.rcLines.extend(["q"]) + try: + deb.run("", code_ns, local_ns) + except StopIteration: + # Stop iteration is raised on quit command + pass + + except Exception: etype, value, tb = sys.exc_info() # Skip three frames in the traceback: the %run one, # one inside bdb.py, and the command-line typed by the @@ -1113,7 +1127,7 @@ def timeit(self, line='', cell=None, local_ns=None): ) if stmt == "" and cell is None: return - + timefunc = timeit.default_timer number = int(getattr(opts, "n", 0)) default_repeat = 7 if timeit.default_repeat < 7 else timeit.default_repeat @@ -1228,7 +1242,7 @@ def timeit(self, line='', cell=None, local_ns=None): @needs_local_scope @line_cell_magic @output_can_be_silenced - def time(self,line='', cell=None, local_ns=None): + def time(self, line="", cell=None, local_ns=None): """Time execution of a Python statement or expression. The CPU and wall clock times are printed, and the value of the @@ -1243,13 +1257,19 @@ def time(self,line='', cell=None, local_ns=None): - In cell mode, you can time the cell body (a directly following statement raises an error). - This function provides very basic timing functionality. Use the timeit + This function provides very basic timing functionality. Use the timeit magic for more control over the measurement. .. versionchanged:: 7.3 User variables are no longer expanded, the magic line is always left unmodified. + .. versionchanged:: 8.3 + The time magic now correctly propagates system-exiting exceptions + (such as ``KeyboardInterrupt`` invoked when interrupting execution) + rather than just printing out the exception traceback. + The non-system-exception will still be caught as before. + Examples -------- :: @@ -1290,10 +1310,10 @@ def time(self,line='', cell=None, local_ns=None): Compiler : 0.78 s """ # fail immediately if the given expression can't be compiled - + if line and cell: raise UsageError("Can't use statement directly after '%%time'!") - + if cell: expr = self.shell.transform_cell(cell) else: @@ -1304,7 +1324,7 @@ def time(self,line='', cell=None, local_ns=None): t0 = clock() expr_ast = self.shell.compile.ast_parse(expr) - tp = clock()-t0 + tp = clock() - t0 # Apply AST transformations expr_ast = self.shell.transform_ast(expr_ast) @@ -1312,8 +1332,8 @@ def time(self,line='', cell=None, local_ns=None): # Minimum time above which compilation time will be reported tc_min = 0.1 - expr_val=None - if len(expr_ast.body)==1 and isinstance(expr_ast.body[0], ast.Expr): + expr_val = None + if len(expr_ast.body) == 1 and isinstance(expr_ast.body[0], ast.Expr): mode = 'eval' source = '' expr_ast = ast.Expression(expr_ast.body[0].value) @@ -1322,25 +1342,25 @@ def time(self,line='', cell=None, local_ns=None): source = '' # multi-line %%time case if len(expr_ast.body) > 1 and isinstance(expr_ast.body[-1], ast.Expr): - expr_val= expr_ast.body[-1] + expr_val = expr_ast.body[-1] expr_ast = expr_ast.body[:-1] expr_ast = Module(expr_ast, []) expr_val = ast.Expression(expr_val.value) t0 = clock() code = self.shell.compile(expr_ast, source, mode) - tc = clock()-t0 + tc = clock() - t0 # skew measurement as little as possible glob = self.shell.user_ns wtime = time.time # time execution wall_st = wtime() - if mode=='eval': + if mode == "eval": st = clock2() try: out = eval(code, glob, local_ns) - except: + except Exception: self.shell.showtraceback() return end = clock2() @@ -1348,12 +1368,12 @@ def time(self,line='', cell=None, local_ns=None): st = clock2() try: exec(code, glob, local_ns) - out=None + out = None # multi-line %%time case if expr_val is not None: code_2 = self.shell.compile(expr_val, source, 'eval') out = eval(code_2, glob, local_ns) - except: + except Exception: self.shell.showtraceback() return end = clock2() @@ -1583,14 +1603,15 @@ def parse_breakpoint(text, current_file): return current_file, int(text) else: return text[:colon], int(text[colon+1:]) - + + def _format_time(timespan, precision=3): """Formats the timespan in a human readable form""" if timespan >= 60.0: # we have more than a minute, format that in a human readable form # Idea from http://snipplr.com/view/5713/ - parts = [("d", 60*60*24),("h", 60*60),("min", 60), ("s", 1)] + parts = [("d", 60 * 60 * 24), ("h", 60 * 60), ("min", 60), ("s", 1)] time = [] leftover = timespan for suffix, length in parts: @@ -1602,7 +1623,6 @@ def _format_time(timespan, precision=3): break return " ".join(time) - # Unfortunately characters outside of range(128) can cause problems in # certain terminals. # See bug: https://bugs.launchpad.net/ipython/+bug/348466 @@ -1616,7 +1636,7 @@ def _format_time(timespan, precision=3): except: pass scaling = [1, 1e3, 1e6, 1e9] - + if timespan > 0.0: order = min(-int(math.floor(math.log10(timespan)) // 3), 3) else: diff --git a/IPython/core/magics/script.py b/IPython/core/magics/script.py index 0c405ef420f..3bfc4d8d671 100644 --- a/IPython/core/magics/script.py +++ b/IPython/core/magics/script.py @@ -67,6 +67,10 @@ def script_args(f): return f +class RaiseAfterInterrupt(Exception): + pass + + @magics_class class ScriptMagics(Magics): """Magics for talking to scripts @@ -176,6 +180,10 @@ def shebang(self, line, cell): The rest of the cell is run by that program. + .. versionchanged:: 9.0 + Interrupting the script executed without `--bg` will end in + raising an exception (unless `--no-raise-error` is passed). + Examples -------- :: @@ -212,7 +220,7 @@ def in_thread(coro): async def _readchunk(stream): try: - return await stream.readuntil(b"\n") + return await stream.read(100) except asyncio.exceptions.IncompleteReadError as e: return e.partial except asyncio.exceptions.LimitOverrunError as e: @@ -292,20 +300,33 @@ async def _stream_communicate(process, cell): p.send_signal(signal.SIGINT) in_thread(asyncio.wait_for(p.wait(), timeout=0.1)) if p.returncode is not None: - print("Process is interrupted.") - return + print("Process was interrupted.") + if args.raise_error: + raise RaiseAfterInterrupt() + else: + return p.terminate() in_thread(asyncio.wait_for(p.wait(), timeout=0.1)) if p.returncode is not None: - print("Process is terminated.") - return + print("Process was terminated.") + if args.raise_error: + raise RaiseAfterInterrupt() + else: + return p.kill() - print("Process is killed.") + print("Process was killed.") + if args.raise_error: + raise RaiseAfterInterrupt() + except RaiseAfterInterrupt: + pass except OSError: pass except Exception as e: print("Error while terminating subprocess (pid=%i): %s" % (p.pid, e)) - return + if args.raise_error: + raise CalledProcessError(p.returncode, cell) from None + else: + return if args.raise_error and p.returncode != 0: # If we get here and p.returncode is still None, we must have diff --git a/IPython/core/release.py b/IPython/core/release.py index af01e755e14..70cd8b5f6a8 100644 --- a/IPython/core/release.py +++ b/IPython/core/release.py @@ -16,7 +16,7 @@ # release. 'dev' as a _version_extra string means this is a development # version _version_major = 8 -_version_minor = 31 +_version_minor = 39 _version_patch = 0 _version_extra = ".dev" # _version_extra = "rc1" diff --git a/IPython/core/tests/test_completer.py b/IPython/core/tests/test_completer.py index a65d5d9a606..b4faf5247f6 100644 --- a/IPython/core/tests/test_completer.py +++ b/IPython/core/tests/test_completer.py @@ -595,7 +595,9 @@ def test_greedy_completions(self): """ ip = get_ipython() ip.ex("a=list(range(5))") + ip.ex("b,c = 1, 1.2") ip.ex("d = {'a b': str}") + ip.ex("x=y='a'") _, c = ip.complete(".", line="a[0].") self.assertFalse(".real" in c, "Shouldn't have completed on a[0]: %s" % c) @@ -625,7 +627,6 @@ def _(line, cursor_pos, expect, message, completion): "Should have completed on a[0].r: %s", Completion(5, 6, "real"), ) - _( "a[0].from_", 10, @@ -654,6 +655,41 @@ def _(line, cursor_pos, expect, message, completion): "Should have completed on `a.app`: %s", Completion(2, 4, "append"), ) + _( + "x.upper() == y.", + 15, + ".upper", + "Should have completed on `x.upper() == y.`: %s", + Completion(15, 15, "upper"), + ) + _( + "(x.upper() == y.", + 16, + ".upper", + "Should have completed on `(x.upper() == y.`: %s", + Completion(16, 16, "upper"), + ) + _( + "(x.upper() == y).", + 17, + ".bit_length", + "Should have completed on `(x.upper() == y).`: %s", + Completion(17, 17, "bit_length"), + ) + _( + "{'==', 'abc'}.", + 14, + ".add", + "Should have completed on `{'==', 'abc'}.`: %s", + Completion(14, 14, "add"), + ) + _( + "b + c.", + 6, + ".hex", + "Should have completed on `b + c.`: %s", + Completion(6, 6, "hex"), + ) def test_omit__names(self): # also happens to test IPCompleter as a configurable @@ -745,6 +781,46 @@ class A: words = completer.get__all__entries(A()) self.assertEqual(words, []) + def test_completes_globals_as_args_of_methods(self): + ip = get_ipython() + c = ip.Completer + c.use_jedi = False + ip.ex("long_variable_name = 1") + ip.ex("a = []") + s, matches = c.complete(None, "a.sort(lo") + self.assertIn("long_variable_name", matches) + + def test_completes_attributes_in_fstring_expressions(self): + ip = get_ipython() + c = ip.Completer + c.use_jedi = False + + class CustomClass: + def method_one(self): + pass + + ip.user_ns["custom_obj"] = CustomClass() + + # Test completion inside f-string expressions + s, matches = c.complete(None, "f'{custom_obj.meth") + self.assertIn(".method_one", matches) + + def test_completes_in_dict_expressions(self): + ip = get_ipython() + c = ip.Completer + c.use_jedi = False + ip.ex("class Test: pass") + ip.ex("test_obj = Test()") + ip.ex("test_obj.attribute = 'value'") + + # Test completion in dictionary expressions + s, matches = c.complete(None, "d = {'key': test_obj.attr") + self.assertIn(".attribute", matches) + + # Test global completion in dictionary expressions with dots + s, matches = c.complete(None, "d = {'k.e.y': Te") + self.assertIn("Test", matches) + def test_func_kw_completions(self): ip = get_ipython() c = ip.Completer @@ -1745,6 +1821,85 @@ def _(expected): _(["completion_a"]) +@pytest.mark.parametrize( + "line,expected", + [ + # Basic test cases + ("np.", "attribute"), + ("np.ran", "attribute"), + ("np.random.rand(np.random.ran", "attribute"), + ("np.random.rand(n", "global"), + ("d['k.e.y.'](ran", "global"), + ("d[0].k", "attribute"), + ("a = { 'a': np.ran", "attribute"), + ("n", "global"), + ("", "global"), + # Dots in string literals + ('some_var = "this is a string with a dot.', "global"), + ("text = 'another string with a dot.", "global"), + ('f"greeting {user.na', "attribute"), # Cursor in f-string expression + ('t"welcome {guest.na', "attribute"), # Cursor in t-string expression + ('f"hello {name} worl', "global"), # Cursor in f-string outside expression + ('f"hello {{a.', "global"), + ('f"hello {{{a.', "attribute"), + # Backslash escapes in strings + ('var = "string with \\"escaped quote and a dot.', "global"), + ("escaped = 'single \\'quote\\' with a dot.", "global"), + # Multi-line strings + ('multi = """This is line one\nwith a dot.', "global"), + ("multi_single = '''Another\nmulti-line\nwith a dot.", "global"), + # Inline comments + ("x = 5 # This is a comment", "global"), + ("y = obj.method() # Comment after dot.method", "global"), + # Hash symbol within string literals should not be treated as comments + ("d['#'] = np.", "attribute"), + # Nested parentheses with dots + ("complex_expr = (func((obj.method(param.attr", "attribute"), + ("multiple_nesting = {key: [value.attr", "attribute"), + # Numbers + ("3.", "global"), + ("3.14", "global"), + ("-42.14", "global"), + ("x = func(3.14", "global"), + ("x = func(a3.", "attribute"), + ("x = func(a3.12", "global"), + ("3.1.", "attribute"), + ("-3.1.", "attribute"), + ("(3).", "attribute"), + # Additional cases + ("", "global"), + ('str_with_code = "x.attr', "global"), + ('f"formatted {obj.attr', "attribute"), + ('f"formatted {obj.attr}', "global"), + ("dict_with_dots = {'key.with.dots': value.attr", "attribute"), + ("d[f'{a}']['{a.", "global"), + ], +) +def test_completion_context(line, expected): + """Test completion context""" + ip = get_ipython() + get_context = ip.Completer._determine_completion_context + result = get_context(line) + assert result.value == expected, f"Failed on input: '{line}'" + + +@pytest.mark.xfail(reason="Completion context not yet supported") +@pytest.mark.parametrize( + "line, expected", + [ + ("f'{f'a.", "global"), # Nested f-string + ("3a.", "global"), # names starting with numbers or other symbols + ("$).", "global"), # random things with dot at end + ], +) +def test_unsupported_completion_context(line, expected): + """Test unsupported completion context""" + ip = get_ipython() + get_context = ip.Completer._determine_completion_context + result = get_context(line) + assert result.value == expected, f"Failed on input: '{line}'" + + @pytest.mark.parametrize( "setup,code,expected,not_expected", [ diff --git a/IPython/core/tests/test_magic.py b/IPython/core/tests/test_magic.py index 036f250d341..8786c849056 100644 --- a/IPython/core/tests/test_magic.py +++ b/IPython/core/tests/test_magic.py @@ -6,11 +6,15 @@ import os import re import shlex +import signal import sys import warnings from importlib import invalidate_caches from io import StringIO from pathlib import Path +from time import sleep +from threading import Thread +from subprocess import CalledProcessError from textwrap import dedent from unittest import TestCase, mock @@ -808,6 +812,16 @@ def test_timeit_invalid_return(): with pytest.raises(SyntaxError): _ip.run_line_magic('timeit', 'return') +def test_timeit_raise_on_interrupt(): + ip = get_ipython() + + with pytest.raises(KeyboardInterrupt): + thread = Thread(target=_interrupt_after_1s) + thread.start() + ip.run_cell_magic("timeit", "", "from time import sleep; sleep(2)") + thread.join() + + @dec.skipif(execution.profile is None) def test_prun_special_syntax(): "Test %%prun with IPython special syntax" @@ -1121,6 +1135,36 @@ def test_script_config(): assert "whoda" in sm.magics["cell"] +def _interrupt_after_1s(): + sleep(1) + signal.raise_signal(signal.SIGINT) + + +def test_script_raise_on_interrupt(): + ip = get_ipython() + + with pytest.raises(CalledProcessError): + thread = Thread(target=_interrupt_after_1s) + thread.start() + ip.run_cell_magic( + "script", f"{sys.executable}", "from time import sleep; sleep(2)" + ) + thread.join() + + +def test_script_do_not_raise_on_interrupt(): + ip = get_ipython() + + thread = Thread(target=_interrupt_after_1s) + thread.start() + ip.run_cell_magic( + "script", + f"--no-raise-error {sys.executable}", + "from time import sleep; sleep(2)", + ) + thread.join() + + def test_script_out(): ip = get_ipython() ip.run_cell_magic("script", f"--out output {sys.executable}", "print('hi')") @@ -1225,6 +1269,37 @@ def test_script_defaults(): assert cmd in ip.magics_manager.magics["cell"] +async def test_script_streams_continiously(capsys): + ip = get_ipython() + # Windows is slow to start up a thread on CI + is_windows = os.name == "nt" + step = 3 if is_windows else 1 + code = dedent( + f"""\ + import time + for _ in range(3): + time.sleep({step}) + print(".", flush=True, end="") + """ + ) + + def print_numbers(): + for i in range(3): + sleep(step) + print(i, flush=True, end="") + + thread = Thread(target=print_numbers) + thread.start() + sleep(step / 2) + ip.run_cell_magic("script", f"{sys.executable}", code) + thread.join() + + captured = capsys.readouterr() + # If the streaming was line-wise or broken + # we would get `012...` + assert captured.out == "0.1.2." + + @magics_class class FooFoo(Magics): """class with both %foo and %%foo magics""" @@ -1475,6 +1550,16 @@ def test_timeit_arguments(): _ip.run_line_magic("timeit", "-n1 -r1 a=('#')") +def test_time_raise_on_interrupt(): + ip = get_ipython() + + with pytest.raises(KeyboardInterrupt): + thread = Thread(target=_interrupt_after_1s) + thread.start() + ip.run_cell_magic("time", "", "from time import sleep; sleep(2)") + thread.join() + + MINIMAL_LAZY_MAGIC = """ from IPython.core.magic import ( Magics, diff --git a/IPython/terminal/interactiveshell.py b/IPython/terminal/interactiveshell.py index ef4f5cd3f65..e95204fe058 100644 --- a/IPython/terminal/interactiveshell.py +++ b/IPython/terminal/interactiveshell.py @@ -26,7 +26,10 @@ Any, validate, Float, + DottedObjectName, ) +from traitlets.utils.importstring import import_item + from prompt_toolkit.auto_suggest import AutoSuggestFromHistory from prompt_toolkit.enums import DEFAULT_BUFFER, EditingMode @@ -214,7 +217,9 @@ class TerminalInteractiveShell(InteractiveShell): pt_app: UnionType[PromptSession, None] = None auto_suggest: UnionType[ - AutoSuggestFromHistory, NavigableAutoSuggestFromHistory, None + AutoSuggestFromHistory, + NavigableAutoSuggestFromHistory, + None, ] = None debugger_history = None @@ -420,8 +425,71 @@ def _displayhook_class_default(self): "Default is `'NavigableAutoSuggestFromHistory`'.", allow_none=True, ).tag(config=True) + _autosuggestions_provider: Any + + llm_constructor_kwargs = Dict( + {}, + help=""" + Extra arguments to pass to `llm_provider_class` constructor. + + This is used to – for example – set the `model_id`""", + ).tag(config=True) + + llm_prefix_from_history = DottedObjectName( + "input_history", + help="""\ + Fully Qualifed name of a function that takes an IPython history manager and + return a prefix to pass the llm provider in addition to the current buffer + text. + + You can use: + + - no_prefix + - input_history + + As default value. `input_history` (default), will use all the input history + of current IPython session - def _set_autosuggestions(self, provider): + """, + ).tag(config=True) + _llm_prefix_from_history: Any + + @observe("llm_prefix_from_history") + def _llm_prefix_from_history_changed(self, change): + name = change.new + self._llm_prefix_from_history = name + self._set_autosuggestions() + + llm_provider_class = DottedObjectName( + None, + allow_none=True, + help="""\ + Provisional: + This is a provisinal API in IPython 8.32, before stabilisation + in 9.0, it may change without warnings. + + class to use for the `NavigableAutoSuggestFromHistory` to request + completions from a LLM, this should inherit from + `jupyter_ai_magics:BaseProvider` and implement + `stream_inline_completions` + """, + ).tag(config=True) + _llm_provider_class: Any = None + + @observe("llm_provider_class") + def _llm_provider_class_changed(self, change): + provider_class = change.new + if provider_class is not None: + warn( + "TerminalInteractiveShell.llm_provider_class is a provisional" + " API as of IPython 8.32, and may change without warnings." + ) + self._llm_provider_class = provider_class + self._set_autosuggestions() + + def _set_autosuggestions(self, provider=None): + if provider is None: + provider = self.autosuggestions_provider # disconnect old handler if self.auto_suggest and isinstance( self.auto_suggest, NavigableAutoSuggestFromHistory @@ -432,7 +500,38 @@ def _set_autosuggestions(self, provider): elif provider == "AutoSuggestFromHistory": self.auto_suggest = AutoSuggestFromHistory() elif provider == "NavigableAutoSuggestFromHistory": + # LLM stuff are all Provisional in 8.32 + if self._llm_provider_class: + + def init_llm_provider(): + llm_provider_constructor = import_item(self._llm_provider_class) + return llm_provider_constructor(**self.llm_constructor_kwargs) + + else: + init_llm_provider = None self.auto_suggest = NavigableAutoSuggestFromHistory() + # Provisinal in 8.32 + self.auto_suggest._init_llm_provider = init_llm_provider + + name = self.llm_prefix_from_history + + if name == "no_prefix": + + def no_prefix(history_manager): + return "" + + fun = no_prefix + + elif name == "input_history": + + def input_history(history_manager): + return "\n".join([s[2] for s in history_manager.get_range()]) + "\n" + + fun = input_history + + else: + fun = import_item(name) + self.auto_suggest._llm_prefixer = fun else: raise ValueError("No valid provider.") if self.pt_app: @@ -464,23 +563,23 @@ def _autosuggestions_provider_changed(self, change): "create": Bool(False), }, ), - help="""Add, disable or modifying shortcuts. + help=""" + Add, disable or modifying shortcuts. Each entry on the list should be a dictionary with ``command`` key identifying the target function executed by the shortcut and at least one of the following: - - ``match_keys``: list of keys used to match an existing shortcut, - - ``match_filter``: shortcut filter used to match an existing shortcut, - - ``new_keys``: list of keys to set, - - ``new_filter``: a new shortcut filter to set + - ``match_keys``: list of keys used to match an existing shortcut, + - ``match_filter``: shortcut filter used to match an existing shortcut, + - ``new_keys``: list of keys to set, + - ``new_filter``: a new shortcut filter to set The filters have to be composed of pre-defined verbs and joined by one of the following conjunctions: ``&`` (and), ``|`` (or), ``~`` (not). The pre-defined verbs are: - {} - + {filters} To disable a shortcut set ``new_keys`` to an empty list. To add a shortcut add key ``create`` with value ``True``. @@ -495,8 +594,27 @@ def _autosuggestions_provider_changed(self, change): shortcuts) can be modified or disabled. The full list of shortcuts, command identifiers and filters is available under :ref:`terminal-shortcuts-list`. + + Here is an example: + + .. code:: + + c.TerminalInteractiveShell.shortcuts = [ + {{ + "new_keys": ["c-q"], + "command": "prompt_toolkit:named_commands.capitalize_word", + "create": True, + }}, + {{ + "new_keys": ["c-j"], + "command": "prompt_toolkit:named_commands.beginning_of_line", + "create": True, + }}, + ] + + """.format( - "\n ".join([f"- `{k}`" for k in KEYBINDING_FILTERS]) + filters="\n ".join([f" - ``{k}``" for k in KEYBINDING_FILTERS]) ), ).tag(config=True) @@ -815,7 +933,8 @@ def get_message(): & ~IsDone() & Condition( lambda: isinstance( - self.auto_suggest, NavigableAutoSuggestFromHistory + self.auto_suggest, + NavigableAutoSuggestFromHistory, ) ), ), diff --git a/IPython/terminal/pt_inputhooks/qt.py b/IPython/terminal/pt_inputhooks/qt.py index 2f3f491ef9e..49629cb88fd 100644 --- a/IPython/terminal/pt_inputhooks/qt.py +++ b/IPython/terminal/pt_inputhooks/qt.py @@ -3,9 +3,11 @@ from IPython.external.qt_for_kernel import QtCore, QtGui, enum_helper from IPython import get_ipython -# If we create a QApplication, keep a reference to it so that it doesn't get -# garbage collected. +# If we create a QApplication, QEventLoop, or a QTimer, keep a reference to them +# so that they don't get garbage collected or leak memory when created multiple times. _appref = None +_eventloop = None +_timer = None _already_warned = False @@ -21,7 +23,7 @@ def _reclaim_excepthook(): def inputhook(context): - global _appref + global _appref, _eventloop, _timer app = QtCore.QCoreApplication.instance() if not app: if sys.platform == 'linux': @@ -55,18 +57,20 @@ def inputhook(context): # formatting and look like "bug in IPython". QtCore.QTimer.singleShot(0, _reclaim_excepthook) - event_loop = QtCore.QEventLoop(app) + if _eventloop is None: + _eventloop = QtCore.QEventLoop(app) if sys.platform == 'win32': # The QSocketNotifier method doesn't appear to work on Windows. # Use polling instead. - timer = QtCore.QTimer() - timer.timeout.connect(event_loop.quit) + if _timer is None: + _timer = QtCore.QTimer() + _timer.timeout.connect(_eventloop.quit) while not context.input_is_ready(): - # NOTE: run the event loop, and after 50 ms, call `quit` to exit it. - timer.start(50) # 50 ms - _exec(event_loop) - timer.stop() + # NOTE: run the event loop, and after 10 ms, call `quit` to exit it. + _timer.start(10) # 10 ms + _exec(_eventloop) + _timer.stop() else: # On POSIX platforms, we can use a file descriptor to quit the event # loop when there is input ready to read. @@ -77,14 +81,10 @@ def inputhook(context): # connect the callback we care about before we turn it on # lambda is necessary as PyQT inspect the function signature to know # what arguments to pass to. See https://github.com/ipython/ipython/pull/12355 - notifier.activated.connect(lambda: event_loop.exit()) + notifier.activated.connect(lambda: _eventloop.exit()) notifier.setEnabled(True) # only start the event loop we are not already flipped if not context.input_is_ready(): - _exec(event_loop) + _exec(_eventloop) finally: notifier.setEnabled(False) - - # This makes sure that the event loop is garbage collected. - # See issue 14240. - event_loop.setParent(None) diff --git a/IPython/terminal/shortcuts/__init__.py b/IPython/terminal/shortcuts/__init__.py index ba6d4055262..7b2dd8885b4 100644 --- a/IPython/terminal/shortcuts/__init__.py +++ b/IPython/terminal/shortcuts/__init__.py @@ -24,9 +24,9 @@ from prompt_toolkit.filters import Condition from IPython.core.getipython import get_ipython -from IPython.terminal.shortcuts import auto_match as match -from IPython.terminal.shortcuts import auto_suggest -from IPython.terminal.shortcuts.filters import filter_from_string +from . import auto_match as match +from . import auto_suggest +from .filters import filter_from_string from IPython.utils.decorators import undoc from prompt_toolkit.enums import DEFAULT_BUFFER @@ -203,7 +203,7 @@ def create_identifier(handler: Callable): Binding( auto_suggest.accept, ["right"], - "has_suggestion & default_buffer_focused & emacs_like_insert_mode", + "has_suggestion & default_buffer_focused & emacs_like_insert_mode & is_cursor_at_the_end_of_line", ), Binding( auto_suggest.accept_word, @@ -630,6 +630,7 @@ def win_paste(event): ] UNASSIGNED_ALLOWED_COMMANDS = [ + auto_suggest.llm_autosuggestion, nc.beginning_of_buffer, nc.end_of_buffer, nc.end_of_line, diff --git a/IPython/terminal/shortcuts/auto_suggest.py b/IPython/terminal/shortcuts/auto_suggest.py index 94a94a88c1e..81737c50943 100644 --- a/IPython/terminal/shortcuts/auto_suggest.py +++ b/IPython/terminal/shortcuts/auto_suggest.py @@ -1,9 +1,11 @@ import re +import asyncio import tokenize from io import StringIO -from typing import Callable, List, Optional, Union, Generator, Tuple +from typing import Callable, List, Optional, Union, Generator, Tuple, ClassVar, Any import warnings +import prompt_toolkit from prompt_toolkit.buffer import Buffer from prompt_toolkit.key_binding import KeyPressEvent from prompt_toolkit.key_binding.bindings import named_commands as nc @@ -31,26 +33,127 @@ class AppendAutoSuggestionInAnyLine(Processor): """ Append the auto suggestion to lines other than the last (appending to the last line is natively supported by the prompt toolkit). + + This has a private `_debug` attribute that can be set to True to display + debug information as virtual suggestion on the end of any line. You can do + so with: + + >>> from IPython.terminal.shortcuts.auto_suggest import AppendAutoSuggestionInAnyLine + >>> AppendAutoSuggestionInAnyLine._debug = True + """ + _debug: ClassVar[bool] = False + def __init__(self, style: str = "class:auto-suggestion") -> None: self.style = style def apply_transformation(self, ti: TransformationInput) -> Transformation: - is_last_line = ti.lineno == ti.document.line_count - 1 - is_active_line = ti.lineno == ti.document.cursor_position_row + """ + Apply transformation to the line that is currently being edited. - if not is_last_line and is_active_line: - buffer = ti.buffer_control.buffer + This is a variation of the original implementation in prompt toolkit + that allows to not only append suggestions to any line, but also to show + multi-line suggestions. - if buffer.suggestion and ti.document.is_cursor_at_the_end_of_line: - suggestion = buffer.suggestion.text - else: - suggestion = "" + As transformation are applied on a line-by-line basis; we need to trick + a bit, and elide any line that is after the line we are currently + editing, until we run out of completions. We cannot shift the existing + lines + + There are multiple cases to handle: + The completions ends before the end of the buffer: + We can resume showing the normal line, and say that some code may + be hidden. + + The completions ends at the end of the buffer + We can just say that some code may be hidden. + + And separately: + + The completions ends beyond the end of the buffer + We need to both say that some code may be hidden, and that some + lines are not shown. + + """ + last_line_number = ti.document.line_count - 1 + is_last_line = ti.lineno == last_line_number + + noop = lambda text: Transformation( + fragments=ti.fragments + [(self.style, " " + text if self._debug else "")] + ) + if ti.document.line_count == 1: + return noop("noop:oneline") + if ti.document.cursor_position_row == last_line_number and is_last_line: + # prompt toolkit already appends something; just leave it be + return noop("noop:last line and cursor") + + # first everything before the current line is unchanged. + if ti.lineno < ti.document.cursor_position_row: + return noop("noop:before cursor") + + buffer = ti.buffer_control.buffer + if not buffer.suggestion or not ti.document.is_cursor_at_the_end_of_line: + return noop("noop:not eol") + + delta = ti.lineno - ti.document.cursor_position_row + suggestions = buffer.suggestion.text.splitlines() + + if len(suggestions) == 0: + return noop("noop: no suggestions") + + if prompt_toolkit.VERSION < (3, 0, 49): + if len(suggestions) > 1 and prompt_toolkit.VERSION < (3, 0, 49): + if ti.lineno == ti.document.cursor_position_row: + return Transformation( + fragments=ti.fragments + + [ + ( + "red", + "(Cannot show multiline suggestion; requires prompt_toolkit > 3.0.49)", + ) + ] + ) + else: + return Transformation(fragments=ti.fragments) + elif len(suggestions) == 1: + if ti.lineno == ti.document.cursor_position_row: + return Transformation( + fragments=ti.fragments + [(self.style, suggestions[0])] + ) + return Transformation(fragments=ti.fragments) + + if delta == 0: + suggestion = suggestions[0] return Transformation(fragments=ti.fragments + [(self.style, suggestion)]) + if is_last_line: + if delta < len(suggestions): + suggestion = f"… rest of suggestion ({len(suggestions) - delta} lines) and code hidden" + return Transformation([(self.style, suggestion)]) + + n_elided = len(suggestions) + for i in range(len(suggestions)): + ll = ti.get_line(last_line_number - i) + el = "".join(l[1] for l in ll).strip() + if el: + break + else: + n_elided -= 1 + if n_elided: + return Transformation([(self.style, f"… {n_elided} line(s) hidden")]) + else: + return Transformation( + ti.get_line(last_line_number - len(suggestions) + 1) + + ([(self.style, "shift-last-line")] if self._debug else []) + ) + + elif delta < len(suggestions): + suggestion = suggestions[delta] + return Transformation([(self.style, suggestion)]) else: - return Transformation(fragments=ti.fragments) + shift = ti.lineno - len(suggestions) + 1 + return Transformation(ti.get_line(shift)) class NavigableAutoSuggestFromHistory(AutoSuggestFromHistory): @@ -60,16 +163,34 @@ class NavigableAutoSuggestFromHistory(AutoSuggestFromHistory): state need to carefully be cleared on the right events. """ - def __init__( - self, - ): + skip_lines: int + _connected_apps: list[PromptSession] + + # handle to the currently running llm task that appends suggestions to the + # current buffer; we keep a handle to it in order to cancel it when there is a cursor movement, or + # another request. + _llm_task: asyncio.Task | None = None + + # This is the constructor of the LLM provider from jupyter-ai + # to which we forward the request to generate inline completions. + _init_llm_provider: Callable | None + + _llm_provider_instance: Any | None + _llm_prefixer: Callable = lambda self, x: "wrong" + + def __init__(self): + super().__init__() self.skip_lines = 0 self._connected_apps = [] + self._llm_provider_instance = None + self._init_llm_provider = None + self._request_number = 0 def reset_history_position(self, _: Buffer): self.skip_lines = 0 - def disconnect(self): + def disconnect(self) -> None: + self._cancel_running_llm_task() for pt_app in self._connected_apps: text_insert_event = pt_app.default_buffer.on_text_insert text_insert_event.remove_handler(self.reset_history_position) @@ -94,7 +215,8 @@ def get_suggestion( return None - def _dismiss(self, buffer, *args, **kwargs): + def _dismiss(self, buffer, *args, **kwargs) -> None: + self._cancel_running_llm_task() buffer.suggestion = None def _find_match( @@ -149,6 +271,7 @@ def _find_previous_match(self, text: str, skip_lines: float, history: History): ) def up(self, query: str, other_than: str, history: History) -> None: + self._cancel_running_llm_task() for suggestion, line_number in self._find_next_match( query, self.skip_lines, history ): @@ -165,6 +288,7 @@ def up(self, query: str, other_than: str, history: History) -> None: self.skip_lines = 0 def down(self, query: str, other_than: str, history: History) -> None: + self._cancel_running_llm_task() for suggestion, line_number in self._find_previous_match( query, self.skip_lines, history ): @@ -180,6 +304,160 @@ def down(self, query: str, other_than: str, history: History) -> None: self.skip_lines = line_number break + def _cancel_running_llm_task(self) -> None: + """ + Try to cancel the currently running llm_task if exists, and set it to None. + """ + if self._llm_task is not None: + if self._llm_task.done(): + self._llm_task = None + return + cancelled = self._llm_task.cancel() + if cancelled: + self._llm_task = None + if not cancelled: + warnings.warn( + "LLM task not cancelled, does your provider support cancellation?" + ) + + @property + def _llm_provider(self): + """Lazy-initialized instance of the LLM provider. + + Do not use in the constructor, as `_init_llm_provider` can trigger slow side-effects. + """ + if self._llm_provider_instance is None and self._init_llm_provider: + self._llm_provider_instance = self._init_llm_provider() + return self._llm_provider_instance + + async def _trigger_llm(self, buffer) -> None: + """ + This will ask the current llm provider a suggestion for the current buffer. + + If there is a currently running llm task, it will cancel it. + """ + # we likely want to store the current cursor position, and cancel if the cursor has moved. + try: + import jupyter_ai_magics + except ModuleNotFoundError: + jupyter_ai_magics = None + if not self._llm_provider: + warnings.warn("No LLM provider found, cannot trigger LLM completions") + return + if jupyter_ai_magics is None: + warnings.warn("LLM Completion requires `jupyter_ai_magics` to be installed") + + self._cancel_running_llm_task() + + async def error_catcher(buffer): + """ + This catches and log any errors, as otherwise this is just + lost in the void of the future running task. + """ + try: + await self._trigger_llm_core(buffer) + except Exception as e: + get_ipython().log.error("error %s", e) + raise + + # here we need a cancellable task so we can't just await the error caught + self._llm_task = asyncio.create_task(error_catcher(buffer)) + await self._llm_task + + async def _trigger_llm_core(self, buffer: Buffer): + """ + This is the core of the current llm request. + + Here we build a compatible `InlineCompletionRequest` and ask the llm + provider to stream it's response back to us iteratively setting it as + the suggestion on the current buffer. + + Unlike with JupyterAi, as we do not have multiple cells, the cell id + is always set to `None`. + + We set the prefix to the current cell content, but could also insert the + rest of the history or even just the non-fail history. + + In the same way, we do not have cell id. + + LLM provider may return multiple suggestion stream, but for the time + being we only support one. + + Here we make the assumption that the provider will have + stream_inline_completions, I'm not sure it is the case for all + providers. + """ + try: + import jupyter_ai.completions.models as jai_models + except ModuleNotFoundError: + jai_models = None + + if not jai_models: + raise ValueError("jupyter-ai is not installed") + + if not self._llm_provider: + raise ValueError("No LLM provider found, cannot trigger LLM completions") + + hm = buffer.history.shell.history_manager + prefix = self._llm_prefixer(hm) + get_ipython().log.debug("prefix: %s", prefix) + + self._request_number += 1 + request_number = self._request_number + + request = jai_models.InlineCompletionRequest( + number=request_number, + prefix=prefix + buffer.document.text_before_cursor, + suffix=buffer.document.text_after_cursor, + mime="text/x-python", + stream=True, + path=None, + language="python", + cell_id=None, + ) + + async for reply_and_chunks in self._llm_provider.stream_inline_completions( + request + ): + if self._request_number != request_number: + # If a new suggestion was requested, skip processing this one. + return + if isinstance(reply_and_chunks, jai_models.InlineCompletionReply): + if len(reply_and_chunks.list.items) > 1: + raise ValueError( + "Terminal IPython cannot deal with multiple LLM suggestions at once" + ) + buffer.suggestion = Suggestion( + reply_and_chunks.list.items[0].insertText + ) + buffer.on_suggestion_set.fire() + elif isinstance(reply_and_chunks, jai_models.InlineCompletionStreamChunk): + buffer.suggestion = Suggestion(reply_and_chunks.response.insertText) + buffer.on_suggestion_set.fire() + return + + +async def llm_autosuggestion(event: KeyPressEvent): + """ + Ask the AutoSuggester from history to delegate to ask an LLM for completion + + This will first make sure that the current buffer have _MIN_LINES (7) + available lines to insert the LLM completion + + Provisional as of 8.32, may change without warnings + + """ + _MIN_LINES = 5 + provider = get_ipython().auto_suggest + if not isinstance(provider, NavigableAutoSuggestFromHistory): + return + doc = event.current_buffer.document + lines_to_insert = max(0, _MIN_LINES - doc.line_count + doc.cursor_position_row) + for _ in range(lines_to_insert): + event.current_buffer.insert_text("\n", move_cursor=False, fire_event=False) + + await provider._trigger_llm(event.current_buffer) + def accept_or_jump_to_end(event: KeyPressEvent): """Apply autosuggestion or jump to end of line.""" diff --git a/IPython/terminal/tests/fake_llm.py b/IPython/terminal/tests/fake_llm.py new file mode 100644 index 00000000000..91c676b8865 --- /dev/null +++ b/IPython/terminal/tests/fake_llm.py @@ -0,0 +1,103 @@ +import asyncio +from time import sleep + +try: + from jupyter_ai_magics.providers import BaseProvider + from langchain_community.llms import FakeListLLM +except ImportError: + + class BaseProvider: + pass + + class FakeListLLM: + pass + + +FIBONACCI = """\ +def fib(n): + if n < 2: return n + return fib(n - 1) + fib(n - 2) +""" + + +class FibonacciCompletionProvider(BaseProvider, FakeListLLM): # type: ignore[misc, valid-type] + + id = "my_provider" + name = "My Provider" + model_id_key = "model" + models = ["model_a"] + + def __init__(self, **kwargs): + kwargs["responses"] = ["This fake response will not be used for completion"] + kwargs["model_id"] = "model_a" + super().__init__(**kwargs) + + async def generate_inline_completions(self, request): + raise ValueError("IPython only supports streaming models.") + + async def stream_inline_completions(self, request): + from jupyter_ai.completions.models import ( + InlineCompletionList, + InlineCompletionReply, + ) + + assert request.number > 0 + token = f"t{request.number}s0" + last_line = request.prefix.splitlines()[-1] + + if not FIBONACCI.startswith(last_line): + return + + yield InlineCompletionReply( + list=InlineCompletionList( + items=[ + {"insertText": "", "isIncomplete": True, "token": token}, + ] + ), + reply_to=request.number, + ) + + async for reply in self._stream( + FIBONACCI[len(last_line) :], + request.number, + token, + ): + yield reply + + async def _stream(self, sentence, request_number, token, start_with=""): + from jupyter_ai.completions.models import InlineCompletionStreamChunk + + suggestion = start_with + + for fragment in sentence.split(" "): + await asyncio.sleep(0.05) + if suggestion: + suggestion += " " + suggestion += fragment + yield InlineCompletionStreamChunk( + type="stream", + response={"insertText": suggestion, "token": token}, + reply_to=request_number, + done=False, + ) + + # finally, send a message confirming that we are done + yield InlineCompletionStreamChunk( + type="stream", + response={"insertText": suggestion, "token": token}, + reply_to=request_number, + done=True, + ) + + +class SlowStartingCompletionProvider(BaseProvider, FakeListLLM): # type: ignore[misc, valid-type] + id = "slow_provider" + name = "Slow Provider" + model_id_key = "model" + models = ["model_a"] + + def __init__(self, **kwargs): + kwargs["responses"] = ["This fake response will be used for completion"] + kwargs["model_id"] = "model_a" + sleep(10) + super().__init__(**kwargs) diff --git a/IPython/terminal/tests/test_shortcuts.py b/IPython/terminal/tests/test_shortcuts.py index 3cf2524f848..2436e083cd8 100644 --- a/IPython/terminal/tests/test_shortcuts.py +++ b/IPython/terminal/tests/test_shortcuts.py @@ -1,4 +1,6 @@ import pytest +import time +from IPython.terminal.interactiveshell import PtkHistoryAdapter from IPython.terminal.shortcuts.auto_suggest import ( accept, accept_or_jump_to_end, @@ -7,12 +9,14 @@ accept_word, accept_and_keep_cursor, discard, + llm_autosuggestion, NavigableAutoSuggestFromHistory, swap_autosuggestion_up, swap_autosuggestion_down, ) from IPython.terminal.shortcuts.auto_match import skip_over from IPython.terminal.shortcuts import create_ipython_shortcuts, reset_search_buffer +from IPython.testing import decorators as dec from prompt_toolkit.history import InMemoryHistory from prompt_toolkit.buffer import Buffer @@ -41,6 +45,41 @@ def make_event(text, cursor, suggestion): return event +try: + from .fake_llm import FIBONACCI +except ImportError: + FIBONACCI = "" + + +@dec.skip_without("jupyter_ai") +@pytest.mark.asyncio +async def test_llm_autosuggestion(): + provider = NavigableAutoSuggestFromHistory() + ip = get_ipython() + ip.auto_suggest = provider + ip.llm_provider_class = "IPython.terminal.tests.fake_llm.FibonacciCompletionProvider" + ip.history_manager.get_range = Mock(return_value=[]) + text = "def fib" + event = Mock() + event.current_buffer = Buffer( + history=PtkHistoryAdapter(ip), + ) + event.current_buffer.insert_text(text, move_cursor=True) + await llm_autosuggestion(event) + assert event.current_buffer.suggestion.text == FIBONACCI[len(text) :] + + +def test_slow_llm_provider_should_not_block_init(): + ip = get_ipython() + provider = NavigableAutoSuggestFromHistory() + ip.auto_suggest = provider + start = time.perf_counter() + ip.llm_provider_class = "tests.fake_llm.SlowStartingCompletionProvider" + end = time.perf_counter() + elapsed = end - start + assert elapsed < 0.1 + + @pytest.mark.parametrize( "text, suggestion, expected", [ @@ -226,6 +265,7 @@ def test_other_providers(): assert swap_autosuggestion_down(event) is None +@pytest.mark.asyncio async def test_navigable_provider(): provider = NavigableAutoSuggestFromHistory() history = InMemoryHistory(history_strings=["very_a", "very", "very_b", "very_c"]) @@ -278,6 +318,7 @@ def get_suggestion(): assert get_suggestion().text == "_a" +@pytest.mark.asyncio async def test_navigable_provider_multiline_entries(): provider = NavigableAutoSuggestFromHistory() history = InMemoryHistory(history_strings=["very_a\nvery_b", "very_c"]) diff --git a/docs/autogen_config.py b/docs/autogen_config.py index 5bfa67e0b6f..6d82aca52bf 100755 --- a/docs/autogen_config.py +++ b/docs/autogen_config.py @@ -3,7 +3,6 @@ import inspect from pathlib import Path from IPython.terminal.ipapp import TerminalIPythonApp -from ipykernel.kernelapp import IPKernelApp from traitlets import Undefined from collections import defaultdict @@ -103,13 +102,9 @@ def write_doc(name, title, app, preamble=None): trait_aliases = reverse_aliases(app) filename = options / (name + ".rst") with open(filename, "w", encoding="utf-8") as f: - f.write(".. _" + name + "_options:" + "\n\n") - f.write(title + "\n") - f.write(("=" * len(title)) + "\n") f.write("\n") if preamble is not None: f.write(preamble + '\n\n') - #f.write(app.document_config_options()) for c in app._classes_inc_parents(): f.write(class_config_rst_doc(c, trait_aliases)) @@ -121,7 +116,3 @@ def write_doc(name, title, app, preamble=None): Path(generated).write_text("", encoding="utf-8") write_doc('terminal', 'Terminal IPython options', TerminalIPythonApp()) - write_doc('kernel', 'IPython kernel options', IPKernelApp(), - preamble=("These options can be used in :file:`ipython_kernel_config.py`. " - "The kernel also respects any options in `ipython_config.py`"), - ) diff --git a/docs/source/config/details.rst b/docs/source/config/details.rst index 69dad2c80ec..4c2b0117b54 100644 --- a/docs/source/config/details.rst +++ b/docs/source/config/details.rst @@ -1,6 +1,101 @@ -======================= -Specific config details -======================= +============================== +Specific configuration details +============================== + +.. _llm_suggestions: + +LLM Suggestions +=============== + +Starting with 9.0, IPython will be able to use LLM providers to suggest code in +the terminal. This requires a recent version of prompt_toolkit in order to allow +multiline suggestions. There are currently a number of limitations, and feedback +on the API is welcome. + +Unlike many of IPython features, this is not enabled by default and requires +multiple configuration options to be set to properly work: + + - Set a keybinding to trigger LLM suggestions. Due to terminal limitations + across platforms and emulators, it is difficult to provide a default + keybinding. Note that not all keybindings are availables, in particular all + the `Ctrl-Enter`, `Alt-backslash` and `Ctrl-Shift-Enter` are not available + without integration with your terminal emulator. + + - Chose a LLM `provider`, usually from Jupyter-AI. This will be the interface + between IPython itself, and the LLM – that may be local or in on a server. + + - Configure said provider with models, API keys, etc – this will depend on the + provider, and you will have to refer to Jupyter-AI documentation, and/or your + LLM documenatation. + + +While setting up IPython to use a real LLM, you can refer to +``examples/auto_suggest_llm.py`` that both provide an example of how to set up +IPython to use a Fake LLM provider, this can help ensure that the full setup is +working before switching to a real LLM provider. + + +Setup a keybinding +------------------ + +You may want to refer on how to setup a keybinding in IPython, but in short you +want to bind the ``IPython:auto_suggest.llm_autosuggestion`` command to a +keybinding, and have it active only when the default buffer isi focused, and +when using the NavigableSuggestions suggestter (this is the default suggestter, +the one that is history and LLM aware). Thus the ``navigable_suggestions & +default_buffer_focused`` filter should be used. + +Usually ``Ctrl-Q`` on macos is an available shortcut, note that is does use +``Ctrl``, and not ``Command``. + +The following example will bind ``Ctrl-Q`` to the ``llm_autosuggestion`` +command, with the suggested filter:: + + c.TerminalInteractiveShell.shortcuts = [ + { + "new_keys": ["c-q"], + "command": "IPython:auto_suggest.llm_autosuggestion", + "new_filter": "navigable_suggestions & default_buffer_focused", + "create": True, + }, + ] + + +Choose a LLM provider +--------------------- + +Set the ``TerminalInteractiveShell.llm_provider_class`` trait to the fully +qualified name of the Provider you like, when testing from inside the IPython +source tree, you can use +``"examples.auto_suggest_llm.ExampleCompletionProvider"`` This will always +stream an extract of the Little Prince by Antoine de Saint-Exupéry, and will not +require any API key or real LLM. + + +In your configuration file adapt the following line to your needs: + +.. code-block:: python + + c.TerminalInteractiveShell.llm_provider_class = "examples.auto_suggest_llm.ExampleCompletionProvider" + +Configure the provider +---------------------- + +It the provider needs to be passed parameters at initialization, you can do so +by setting the ``llm_provider_kwargs`` traitlet. + +.. code-block:: python + + c.TerminalInteractiveShell.llm_provider_kwargs = {"model": "skynet"} + +This will depdend on the provider you chose, and you will have to refer to +the provider documentation. + +Extra configuration may be needed by setting environment variables, this will +again depend on the provider you chose, and you will have to refer to the +provider documentation. + + .. _custom_prompts: diff --git a/docs/source/config/inputtransforms.rst b/docs/source/config/inputtransforms.rst index 33f14887d6a..222d113d1cf 100644 --- a/docs/source/config/inputtransforms.rst +++ b/docs/source/config/inputtransforms.rst @@ -13,7 +13,7 @@ interactive interface. Using them carelessly can easily break IPython! String based transformations ============================ -.. currentmodule:: IPython.core.inputtransforms +.. currentmodule:: IPython.core.inputtransformers2 When the user enters code, it is first processed as a string. By the end of this stage, it must be valid Python syntax. diff --git a/docs/source/config/options/index.rst b/docs/source/config/options/index.rst index e8fb9b24c18..4330e39f0e3 100644 --- a/docs/source/config/options/index.rst +++ b/docs/source/config/options/index.rst @@ -1,12 +1,10 @@ -=============== -IPython options -=============== +.. _terminal_options: + +Terminal options +================ Any of the options listed here can be set in config files, at the command line, from inside IPython, or using a traitlets :class:`Config` object. See :ref:`setting_config` for details. -.. toctree:: - - terminal - kernel +.. include:: terminal.rst diff --git a/docs/source/whatsnew/version8.rst b/docs/source/whatsnew/version8.rst index 310a03b70b5..346ee53ae6c 100644 --- a/docs/source/whatsnew/version8.rst +++ b/docs/source/whatsnew/version8.rst @@ -1,6 +1,156 @@ ============ 8.x Series ============ + +.. _version 8.37: + +IPython 8.37 +============ + +This release includes fixes for the tab completer and LLM completer backported from IPython 9.4: + +- :ghpull:`14910` Eliminate startup delay when LLM completion provider is configured +- :ghpull:`14898` Fix attribute completion for expressions with comparison operators + +.. _version 8.36: + +IPython 8.36 +============ + +This is a small release with minor changes in the context passed to the LLM completion +provider and a fix for interruption of execution magics: + +- :ghpull:`14890` Fixed interruption of ``%%time`` and ``%%debug`` magics +- :ghpull:`14877` Removed spurious empty lines from ``prefix`` passed to LLM, and separated part after cursor into the ``suffix`` + +.. _version 8.35: + +IPython 8.35 +============ + +This small early April release includes a few backports of bug fixes for tab and LLM completions: + +- :ghpull:`14838` Fixed tab-completion of global variables in lines with a dot when jedi is off +- :ghpull:`14846` Fixed LLM request number always being set to zero and removed spurious logging +- :ghpull:`14851` Passes current input history to LLMs + + +.. _version 8.34: + +IPython 8.34 +============ + +This tiny beginning of March release included two bug fixes: + +- :ghpull:`14823` Fixed right arrow incorrectly accepting invisible auto-suggestions +- :ghpull:`14828` Fixed Qt backend crash + +along with a backport of improved documentation and configurability of LLM completions. + +.. _version 8.33: + +IPython 8.33 +============ + +This small end of February release included a few backports of bug fixes and minor enhancements: + +- :ghpull:`14717` Fixed auto-suggestion on Prompt Toolkit < 3.0.49 +- :ghpull:`14738` Fixed Python 3.13 compatibility of ``local_ns`` +- :ghpull:`14700` Improved Qt object management and performance +- :ghpull:`14790` Better documentation and configurability of LLM completions + + +.. _version 8.32: + +IPython 8.32 +============ + +Medium release of IPython for this end of January and this new year 2025. +This is now a different branch than the main branch, and will only accumulate +bugfixes and small improvements and requested backports, while 9.0 forges ahead. + + +We'll note 4 bugfixes and an and unstable preview feature. + +- :ghpull:`14640` Fixes an error were a warning about virtualenvs was incorrectly triggered. + +- :ghpull:`14684` Fixes an issue on Python 3.12+ with post mortem debugging. +- :ghpull:`14693` Fixes a bug where magics were ignoring SIGINT +- :ghpull:`14695` Fixes an issue where magics would not display all subprocess output. + + +As a preview feature, look into the ``examples`` folder on how to configure +autosuggestion using a large language model. This is not yet ready for +production use, has only partial documentation and can change without warning, +but should allow you to hook in a Jupyter-ai LLM provider to suggest code in the +CLI (currently only via a keyboard shortcut). See :ghpull:`14623` for more details +until further documentation is available in 9.0. + + +As usual you can find the full list of PRs on GitHub under `the 8.32 +`__ milestone. + +For something completely different +---------------------------------- + +`Dora Rudolfine Richter `__ +1892–1966 was a German trans woman and the first known person to undergo +complete male-to-female gender-affirming surgery. After fleeing Nazi Germany, +she lived in Czechoslovakia where she obtained her full legal name change in +April 1934. She moved back to Germany in 1946 with the expulsion of Germans from +Czechoslovakia where she lived until her death at the age of 74 in April 1966. + +Thanks +------ + +Thanks to everyone who helped with the 8.32 release and working toward 9.0. + +Thanks as well to the `D. E. Shaw group `__ for sponsoring +work on IPython and related libraries, in particular the work around LLM +integration. + +.. _version 8.31: + +IPython 8.31 +============ + +Small release for this end of December; This release contains only two notable changes: + + - :ghpull:`14594` Fix completion in tuples, where the completion was + suggesting tuples methods instead of methods and attributes of the current + element. + - :ghpull:`14598` Fix pdb issue with Python 3.13.1, this fix is not perfect + (see :ghissue:`14620`) but should be sufficient for now. + + +As usual you can find the full list of PRs on GitHub under `the 8.31 +`__ milestone. + + +Road to 9.0 +----------- + +The current main branch is now on it's way to be 9.0; do not expect many new +features but a large codebase refactor, and cleanup with many deprecated code +removed, and more aggressive codestyle enforcement. + +For something completely different +---------------------------------- + +`Emmy Noether `__ was a German +mathematician who made groundbreaking contributions to abstract algebra and +theoretical physics. Her work on Noether's theorem, which describes the +relationship between symmetries and conservation laws, has had a profound impact +on the development of modern physics. Noether's work was largely overlooked +during her lifetime, but her legacy as one of the most important mathematicians +of the 20th century has been recognized and celebrated in recent years. + +Thanks +------ + +Thanks as well to the `D. E. Shaw group `__ for sponsoring +work on IPython and related libraries. + .. _version 8.30: IPython 8.30 @@ -977,7 +1127,7 @@ properties: Note that while in the above example we use a static dictionary, libraries may decide to use a custom object that define ``__getitem__``, we caution against using objects that would trigger computation to show documentation, but it is -sometime preferable for highly dynamic code that for example export ans API as +sometime preferable for highly dynamic code that for example export an API as object. diff --git a/examples/IPython Kernel/Capturing Output.ipynb b/examples/IPython Kernel/Capturing Output.ipynb index b04580bd712..0d37f4df4f4 100644 --- a/examples/IPython Kernel/Capturing Output.ipynb +++ b/examples/IPython Kernel/Capturing Output.ipynb @@ -11,7 +11,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "IPython has a [cell magic](Cell Magics.ipynb), `%%capture`, which captures the stdout/stderr of a cell. With this magic you can discard these streams or store them in a variable." + "IPython has a cell magic, `%%capture`, which captures the stdout/stderr of a cell. With this magic you can discard these streams or store them in a variable." ] }, { diff --git a/examples/IPython Kernel/Custom Display Logic.ipynb b/examples/IPython Kernel/Custom Display Logic.ipynb index 447c34ad513..feb6778c831 100644 --- a/examples/IPython Kernel/Custom Display Logic.ipynb +++ b/examples/IPython Kernel/Custom Display Logic.ipynb @@ -18,7 +18,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "As described in the [Rich Output](Rich Output.ipynb) tutorial, the IPython display system can display rich representations of objects in the following formats:\n", + "As described in the Rich Output tutorial, the IPython display system can display rich representations of objects in the following formats:\n", "\n", "* JavaScript\n", "* HTML\n", diff --git a/examples/IPython Kernel/Index.ipynb b/examples/IPython Kernel/Index.ipynb index 6da3e93d202..83de22c778d 100644 --- a/examples/IPython Kernel/Index.ipynb +++ b/examples/IPython Kernel/Index.ipynb @@ -39,12 +39,12 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "* [Cell Magics](Cell Magics.ipynb)\n", - "* [Script Magics](Script Magics.ipynb)\n", - "* [Rich Output](Rich Output.ipynb)\n", - "* [Custom Display Logic](Custom Display Logic.ipynb)\n", - "* [Plotting in the Notebook](Plotting in the Notebook.ipynb)\n", - "* [Capturing Output](Capturing Output.ipynb)" + "* Cell Magics\n", + "* Script Magics\n", + "* Rich Output\n", + "* Custom Display Logic\n", + "* Plotting in the Notebook\n", + "* Capturing Output" ] }, { @@ -58,11 +58,11 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "* [Background Jobs](Background Jobs.ipynb)\n", - "* [Trapezoid Rule](Trapezoid Rule.ipynb)\n", + "* Background Jobs\n", + "* Trapezoidal Rule\n", "* [SymPy](SymPy.ipynb)\n", - "* [Raw Input in the Notebook](Raw Input in the Notebook.ipynb)\n", - "* [Importing Notebooks](Importing Notebooks.ipynb)" + "* Raw Input in the Notebook\n", + "* Importing Notebooks" ] }, { diff --git a/examples/Index.ipynb b/examples/Index.ipynb index 30e201a7e31..469253aed0a 100644 --- a/examples/Index.ipynb +++ b/examples/Index.ipynb @@ -32,7 +32,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "* [IPython Kernel](IPython Kernel/Index.ipynb): IPython's core syntax and command line features available in all frontends\n", + "* IPython Kernel: IPython's core syntax and command line features available in all frontends\n", "* [Embedding](Embedding/Index.ipynb): Embedding and reusing IPython's components into other applications\n" ] } diff --git a/examples/auto_suggest_llm.py b/examples/auto_suggest_llm.py new file mode 100644 index 00000000000..824ed75ae7e --- /dev/null +++ b/examples/auto_suggest_llm.py @@ -0,0 +1,173 @@ +""" +This is an example of Fake LLM Completer for IPython, as +well as example on how to configure IPython for LLMs. + +8.32 – this is provisional and may change. + +To test this you can run the following command from the root of IPython +directory: + + $ ipython --TerminalInteractiveShell.llm_provider_class=examples.auto_suggest_llm.ExampleCompletionProvider + +Or you can set the value in your config file, which also allows you to set a +keyboard shortcut:: + + c.TerminalInteractiveShell.llm_provider_class = "examples.auto_suggest_llm.ExampleCompletionProvider" + c.TerminalInteractiveShell.shortcuts = [ + { + "new_keys": ["c-q"], + "command": "IPython:auto_suggest.llm_autosuggestion", + "new_filter": "navigable_suggestions & default_buffer_focused", + "create": True, + }, + ] + + +You can use the following configuration option to:: + + c.TerminalInteractiveShell.llm_constructor_kwargs = {"model_id": "mymodel"} + + +For convenience and testing you can bind a shortcut at runtime:: + + In [1]: from examples.auto_suggest_llm import setup_shortcut + ...: setup_shortcut('c-q') + + +Getting access to history content +--------------------------------- + +This uses the same providers as Jupyter AI, In JupyterAI, providers may get +access to the current notebook content to pass as to the LLM as context. + +Here Jupyter AI documents how to get such context. + +https://jupyter-ai.readthedocs.io/en/latest/developers/index.html + + +When reusing these models you may want to pass them more context as well in +IPython to do so you can set the +`c.TerminalInteractiveShell.llm_prefix_from_history` to `"no_prefix"`, +`"input_history"` or a fully qualified name of a function that will get +imported, get passed a `HistoryManager`, and return a prefix to be added the LLM +context. + + +For more flexibility, subclass the provider, and access the history of IPython +via: + + ``` + ip = get_ipython() + hm = ip.history_manager() + hm.get_range(...) # will let you select how many input/output... etc. + ``` + +""" + +import asyncio +import textwrap +from typing import Any, AsyncIterable, AsyncIterator + +from jupyter_ai_magics.models.completion import ( + InlineCompletionList, + InlineCompletionReply, + InlineCompletionRequest, + InlineCompletionStreamChunk, +) +from jupyter_ai_magics.providers import BaseProvider +from langchain_community.llms import FakeListLLM + + +from IPython.terminal.shortcuts.filters import ( + navigable_suggestions, + default_buffer_focused, +) +from IPython.terminal.shortcuts.auto_suggest import llm_autosuggestion + + +def setup_shortcut(seq): + import IPython + + ip = IPython.get_ipython() + ip.pt_app.key_bindings.add_binding( + seq, filter=(navigable_suggestions & default_buffer_focused) + )(llm_autosuggestion), + + +class ExampleCompletionProvider(BaseProvider, FakeListLLM): # type: ignore[misc, valid-type] + """ + This is an example Fake LLM provider for IPython + + As of 8.32 this is provisional and may change without any warnings + """ + + id = "my_provider" + name = "My Provider" + model_id_key = "model" + models = ["model_a"] + + def __init__(self, **kwargs: Any): + kwargs["responses"] = ["This fake response will not be used for completion"] + kwargs["model_id"] = "model_a" + super().__init__(**kwargs) + + async def generate_inline_completions( + self, request: InlineCompletionRequest + ) -> InlineCompletionReply: + raise ValueError("IPython 8.32 only support streaming models for now.") + + async def stream_inline_completions( + self, request: InlineCompletionRequest + ) -> AsyncIterator[InlineCompletionStreamChunk]: + token_1 = f"t{request.number}s0" + + yield InlineCompletionReply( + list=InlineCompletionList( + items=[ + {"insertText": "It", "isIncomplete": True, "token": token_1}, + ] + ), + reply_to=request.number, + ) + + reply: InlineCompletionStreamChunk + async for reply in self._stream( + textwrap.dedent( + """ + was then that the fox appeared. + “Good morning,” said the fox. + “Good morning,” the little prince responded politely, although when he turned around he saw nothing. + “I am right here,” the voice said, “under the apple tree.” + “Who are you?” asked the little prince, and added, “You are very pretty to look at.” + “I am a fox,” said the fox. + “Come and play with me,” proposed the little prince. “I am so unhappy.” + """ + ).strip(), + request.number, + token_1, + start_with="It", + ): + yield reply + + async def _stream( + self, sentence: str, request_number: int, token: str, start_with: str = "" + ) -> AsyncIterable[InlineCompletionStreamChunk]: + suggestion = start_with + + for fragment in sentence.split(" "): + await asyncio.sleep(0.05) + suggestion += " " + fragment + yield InlineCompletionStreamChunk( + type="stream", + response={"insertText": suggestion, "token": token}, + reply_to=request_number, + done=False, + ) + + # finally, send a message confirming that we are done + yield InlineCompletionStreamChunk( + type="stream", + response={"insertText": suggestion, "token": token}, + reply_to=request_number, + done=True, + ) diff --git a/pyproject.toml b/pyproject.toml index e973f03de77..3c8aa5a8cb8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -104,6 +104,7 @@ test = [ test_extra = [ "ipython[test]", "curio", + "jupyter_ai", "matplotlib!=3.2.0", "nbformat", "numpy>=1.23",