mirror of
https://github.com/neovim/neovim.git
synced 2025-02-25 18:55:25 -06:00
Merge pull request #3944 from bfredl/detach
job control: add `detach` option and `jobpid` function and teardown PTY processes correctly.
This commit is contained in:
commit
297075bf47
@ -83,6 +83,7 @@ function! s:clipboard.set(lines, regtype, reg)
|
||||
end
|
||||
let selection.data = [a:lines, a:regtype]
|
||||
let argv = split(s:copy[a:reg], " ")
|
||||
let selection.detach = s:cache_enabled
|
||||
let jobid = jobstart(argv, selection)
|
||||
if jobid <= 0
|
||||
echohl WarningMsg
|
||||
|
@ -1894,6 +1894,7 @@ isdirectory( {directory}) Number TRUE if {directory} is a directory
|
||||
islocked( {expr}) Number TRUE if {expr} is locked
|
||||
items( {dict}) List key-value pairs in {dict}
|
||||
jobclose({job}[, {stream}]) Number Closes a job stream(s)
|
||||
jobpid({job}) Number Returns pid of a job.
|
||||
jobresize({job}, {width}, {height})
|
||||
Number Resize {job}'s pseudo terminal window
|
||||
jobsend({job}, {data}) Number Writes {data} to {job}'s stdin
|
||||
@ -4157,6 +4158,9 @@ jobclose({job}[, {stream}]) {Nvim} *jobclose()*
|
||||
Close {job}'s {stream}, which can be one "stdin", "stdout" or
|
||||
"stderr". If {stream} is omitted, all streams are closed.
|
||||
|
||||
jobpid({job}) {Nvim} *jobpid()*
|
||||
Return the pid (process id) of {job}.
|
||||
|
||||
jobresize({job}, {width}, {height}) {Nvim} *jobresize()*
|
||||
Resize {job}'s pseudo terminal window to {width} and {height}.
|
||||
This function will fail if used on jobs started without the
|
||||
@ -4194,6 +4198,10 @@ jobstart({cmd}[, {opts}]) {Nvim} *jobstart()*
|
||||
- width: Width of the terminal screen(only if pty is set)
|
||||
- height: Height of the terminal screen(only if pty is set)
|
||||
- TERM: $TERM environment variable(only if pty is set)
|
||||
- detach: Detach the job process from the nvim process. The
|
||||
process won't get killed when nvim exists. If the process
|
||||
dies before nvim exits, on_exit will still be invoked.
|
||||
This option is only allowed for non-pty jobs.
|
||||
Either funcrefs or function names can be passed as event
|
||||
handlers. The {opts} object is also used as the "self"
|
||||
argument for the callback, so the caller may pass arbitrary
|
||||
|
@ -7236,6 +7236,7 @@ static struct fst {
|
||||
{ "islocked", 1, 1, f_islocked },
|
||||
{ "items", 1, 1, f_items },
|
||||
{ "jobclose", 1, 2, f_jobclose },
|
||||
{ "jobpid", 1, 1, f_jobpid },
|
||||
{ "jobresize", 3, 3, f_jobresize },
|
||||
{ "jobsend", 2, 2, f_jobsend },
|
||||
{ "jobstart", 1, 2, f_jobstart },
|
||||
@ -11611,6 +11612,31 @@ static void f_jobclose(typval_T *argvars, typval_T *rettv)
|
||||
}
|
||||
}
|
||||
|
||||
// "jobpid(id)" function
|
||||
static void f_jobpid(typval_T *argvars, typval_T *rettv)
|
||||
{
|
||||
rettv->v_type = VAR_NUMBER;
|
||||
rettv->vval.v_number = 0;
|
||||
|
||||
if (check_restricted() || check_secure()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (argvars[0].v_type != VAR_NUMBER) {
|
||||
EMSG(_(e_invarg));
|
||||
return;
|
||||
}
|
||||
|
||||
TerminalJobData *data = find_job(argvars[0].vval.v_number);
|
||||
if (!data) {
|
||||
EMSG(_(e_invjob));
|
||||
return;
|
||||
}
|
||||
|
||||
Process *proc = (Process *)&data->proc;
|
||||
rettv->vval.v_number = proc->pid;
|
||||
}
|
||||
|
||||
// "jobsend()" function
|
||||
static void f_jobsend(typval_T *argvars, typval_T *rettv)
|
||||
{
|
||||
@ -11771,8 +11797,9 @@ static void f_jobstart(typval_T *argvars, typval_T *rettv)
|
||||
}
|
||||
|
||||
bool pty = job_opts && get_dict_number(job_opts, (uint8_t *)"pty") != 0;
|
||||
bool detach = job_opts && get_dict_number(job_opts, (uint8_t *)"detach") != 0;
|
||||
TerminalJobData *data = common_job_init(argv, on_stdout, on_stderr, on_exit,
|
||||
job_opts, pty);
|
||||
job_opts, pty, detach);
|
||||
Process *proc = (Process *)&data->proc;
|
||||
|
||||
if (pty) {
|
||||
@ -16776,7 +16803,7 @@ static void f_termopen(typval_T *argvars, typval_T *rettv)
|
||||
}
|
||||
|
||||
TerminalJobData *data = common_job_init(argv, on_stdout, on_stderr, on_exit,
|
||||
job_opts, true);
|
||||
job_opts, true, false);
|
||||
data->proc.pty.width = curwin->w_width;
|
||||
data->proc.pty.height = curwin->w_height;
|
||||
data->proc.pty.term_name = xstrdup("xterm-256color");
|
||||
@ -21782,8 +21809,13 @@ char_u *do_string_sub(char_u *str, char_u *pat, char_u *sub, char_u *flags)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline TerminalJobData *common_job_init(char **argv, ufunc_T *on_stdout,
|
||||
ufunc_T *on_stderr, ufunc_T *on_exit, dict_T *self, bool pty)
|
||||
static inline TerminalJobData *common_job_init(char **argv,
|
||||
ufunc_T *on_stdout,
|
||||
ufunc_T *on_stderr,
|
||||
ufunc_T *on_exit,
|
||||
dict_T *self,
|
||||
bool pty,
|
||||
bool detach)
|
||||
{
|
||||
TerminalJobData *data = xcalloc(1, sizeof(TerminalJobData));
|
||||
data->stopped = false;
|
||||
@ -21806,6 +21838,7 @@ static inline TerminalJobData *common_job_init(char **argv, ufunc_T *on_stdout,
|
||||
}
|
||||
proc->cb = on_process_exit;
|
||||
proc->events = data->events;
|
||||
proc->detach = detach;
|
||||
return data;
|
||||
}
|
||||
|
||||
@ -21833,8 +21866,13 @@ static inline bool common_job_callbacks(dict_T *vopts, ufunc_T **on_stdout,
|
||||
|
||||
static inline bool common_job_start(TerminalJobData *data, typval_T *rettv)
|
||||
{
|
||||
data->refcount++;
|
||||
Process *proc = (Process *)&data->proc;
|
||||
if (proc->type == kProcessTypePty && proc->detach) {
|
||||
EMSG2(_(e_invarg2), "terminal/pty job cannot be detached");
|
||||
return false;
|
||||
}
|
||||
|
||||
data->refcount++;
|
||||
char *cmd = xstrdup(proc->argv[0]);
|
||||
if (!process_spawn(proc)) {
|
||||
EMSG2(_(e_jobspawn), cmd);
|
||||
|
@ -21,6 +21,9 @@ bool libuv_process_spawn(LibuvProcess *uvproc)
|
||||
uvproc->uvopts.args = proc->argv;
|
||||
uvproc->uvopts.flags = UV_PROCESS_WINDOWS_HIDE
|
||||
| UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS;
|
||||
if (proc->detach) {
|
||||
uvproc->uvopts.flags |= UV_PROCESS_DETACHED;
|
||||
}
|
||||
uvproc->uvopts.exit_cb = exit_cb;
|
||||
uvproc->uvopts.cwd = NULL;
|
||||
uvproc->uvopts.env = NULL;
|
||||
|
@ -29,6 +29,7 @@
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
static bool process_is_tearing_down = false;
|
||||
|
||||
bool process_spawn(Process *proc) FUNC_ATTR_NONNULL_ALL
|
||||
{
|
||||
@ -112,11 +113,22 @@ bool process_spawn(Process *proc) FUNC_ATTR_NONNULL_ALL
|
||||
|
||||
void process_teardown(Loop *loop) FUNC_ATTR_NONNULL_ALL
|
||||
{
|
||||
process_is_tearing_down = true;
|
||||
kl_iter(WatcherPtr, loop->children, current) {
|
||||
Process *proc = (*current)->data;
|
||||
uv_kill(proc->pid, SIGTERM);
|
||||
proc->term_sent = true;
|
||||
process_stop(proc);
|
||||
if (proc->detach) {
|
||||
// Close handles to process without killing it.
|
||||
CREATE_EVENT(loop->events, process_close_handles, 1, proc);
|
||||
} else {
|
||||
if (proc->type == kProcessTypeUv) {
|
||||
uv_kill(proc->pid, SIGTERM);
|
||||
proc->term_sent = true;
|
||||
process_stop(proc);
|
||||
} else { // kProcessTypePty
|
||||
process_close_streams(proc);
|
||||
pty_process_close_master((PtyProcess *)proc);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Wait until all children exit
|
||||
@ -303,6 +315,10 @@ static void decref(Process *proc)
|
||||
static void process_close(Process *proc)
|
||||
FUNC_ATTR_NONNULL_ARG(1)
|
||||
{
|
||||
if (process_is_tearing_down && proc->detach && proc->closed) {
|
||||
// If a detached process dies while tearing down it might get closed twice.
|
||||
return;
|
||||
}
|
||||
assert(!proc->closed);
|
||||
proc->closed = true;
|
||||
switch (proc->type) {
|
||||
@ -333,6 +349,7 @@ static void on_process_exit(Process *proc)
|
||||
DLOG("Stopping process kill timer");
|
||||
uv_timer_stop(&loop->children_kill_timer);
|
||||
}
|
||||
|
||||
// Process handles are closed in the next event loop tick. This is done to
|
||||
// give libuv more time to read data from the OS after the process exits(If
|
||||
// process_close_streams is called with data still in the OS buffer, we lose
|
||||
|
@ -25,7 +25,7 @@ struct process {
|
||||
Stream *in, *out, *err;
|
||||
process_exit_cb cb;
|
||||
internal_process_cb internal_exit_cb, internal_close_cb;
|
||||
bool closed, term_sent;
|
||||
bool closed, term_sent, detach;
|
||||
Queue *events;
|
||||
};
|
||||
|
||||
@ -48,7 +48,8 @@ static inline Process process_init(Loop *loop, ProcessType type, void *data)
|
||||
.closed = false,
|
||||
.term_sent = false,
|
||||
.internal_close_cb = NULL,
|
||||
.internal_exit_cb = NULL
|
||||
.internal_exit_cb = NULL,
|
||||
.detach = false
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -142,6 +142,35 @@ describe('jobs', function()
|
||||
nvim('command', "call jobstart(['cat', '-'], g:job_opts)")
|
||||
end)
|
||||
|
||||
it('can get the pid value using getpid', function()
|
||||
nvim('command', "let j = jobstart(['cat', '-'], g:job_opts)")
|
||||
local pid = eval('jobpid(j)')
|
||||
eq(0,os.execute('ps -p '..pid..' > /dev/null'))
|
||||
nvim('command', 'call jobstop(j)')
|
||||
eq({'notification', 'exit', {0, 0}}, next_msg())
|
||||
neq(0,os.execute('ps -p '..pid..' > /dev/null'))
|
||||
end)
|
||||
|
||||
it("doesn't survive the exit of nvim", function()
|
||||
-- use sleep, which doesn't die on stdin close
|
||||
nvim('command', "let j = jobstart(['sleep', '1000'], g:job_opts)")
|
||||
local pid = eval('jobpid(j)')
|
||||
eq(0,os.execute('ps -p '..pid..' > /dev/null'))
|
||||
clear()
|
||||
neq(0,os.execute('ps -p '..pid..' > /dev/null'))
|
||||
end)
|
||||
|
||||
it('can survive the exit of nvim with "detach"', function()
|
||||
nvim('command', 'let g:job_opts.detach = 1')
|
||||
nvim('command', "let j = jobstart(['sleep', '1000'], g:job_opts)")
|
||||
local pid = eval('jobpid(j)')
|
||||
eq(0,os.execute('ps -p '..pid..' > /dev/null'))
|
||||
clear()
|
||||
eq(0,os.execute('ps -p '..pid..' > /dev/null'))
|
||||
-- clean up after ourselves
|
||||
os.execute('kill -9 '..pid..' > /dev/null')
|
||||
end)
|
||||
|
||||
it('can pass user data to the callback', function()
|
||||
nvim('command', 'let g:job_opts.user = {"n": 5, "s": "str", "l": [1]}')
|
||||
nvim('command', "call jobstart(['echo'], g:job_opts)")
|
||||
|
Loading…
Reference in New Issue
Block a user