Merge pull request #27655 from bfredl/mpack_obj

refactor(msgpack): allow flushing buffer while packing msgpack
This commit is contained in:
bfredl 2024-03-08 08:57:09 +01:00 committed by GitHub
commit 55c9e2c96e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
18 changed files with 560 additions and 603 deletions

View File

@ -105,6 +105,14 @@ typedef enum {
kObjectTypeTabpage,
} ObjectType;
/// Value by which objects represented as EXT type are shifted
///
/// Subtracted when packing, added when unpacking. Used to allow moving
/// buffer/window/tabpage block inside ObjectType enum. This block yet cannot be
/// split or reordered.
#define EXT_OBJECT_TYPE_SHIFT kObjectTypeBuffer
#define EXT_OBJECT_TYPE_MAX (kObjectTypeTabpage - EXT_OBJECT_TYPE_SHIFT)
struct object {
ObjectType type;
union {

View File

@ -29,7 +29,6 @@
#include "nvim/memory.h"
#include "nvim/memory_defs.h"
#include "nvim/message.h"
#include "nvim/msgpack_rpc/helpers.h"
#include "nvim/msgpack_rpc/unpacker.h"
#include "nvim/pos_defs.h"
#include "nvim/types_defs.h"
@ -984,7 +983,7 @@ Dictionary api_keydict_to_dict(void *value, KeySetLink *table, size_t max_size,
val = DICTIONARY_OBJ(*(Dictionary *)mem);
} else if (field->type == kObjectTypeBuffer || field->type == kObjectTypeWindow
|| field->type == kObjectTypeTabpage) {
val.data.integer = *(Integer *)mem;
val.data.integer = *(handle_T *)mem;
val.type = field->type;
} else if (field->type == kObjectTypeLuaRef) {
// do nothing

View File

@ -12,6 +12,7 @@
#include "nvim/api/private/helpers.h"
#include "nvim/api/private/validate.h"
#include "nvim/api/ui.h"
#include "nvim/assert_defs.h"
#include "nvim/autocmd.h"
#include "nvim/autocmd_defs.h"
#include "nvim/channel.h"
@ -33,12 +34,12 @@
#include "nvim/memory_defs.h"
#include "nvim/msgpack_rpc/channel.h"
#include "nvim/msgpack_rpc/channel_defs.h"
#include "nvim/msgpack_rpc/helpers.h"
#include "nvim/msgpack_rpc/packer.h"
#include "nvim/option.h"
#include "nvim/types_defs.h"
#include "nvim/ui.h"
#define BUF_POS(data) ((size_t)((data)->buf_wptr - (data)->buf))
#define BUF_POS(data) ((size_t)((data)->packer.ptr - (data)->packer.startptr))
#ifdef INCLUDE_GENERATED_DECLARATIONS
# include "api/ui.c.generated.h"
@ -47,55 +48,6 @@
static PMap(uint64_t) connected_uis = MAP_INIT;
#define mpack_w(b, byte) *(*(b))++ = (char)(byte);
static void mpack_w2(char **b, uint32_t v)
{
*(*b)++ = (char)((v >> 8) & 0xff);
*(*b)++ = (char)(v & 0xff);
}
static void mpack_w4(char **b, uint32_t v)
{
*(*b)++ = (char)((v >> 24) & 0xff);
*(*b)++ = (char)((v >> 16) & 0xff);
*(*b)++ = (char)((v >> 8) & 0xff);
*(*b)++ = (char)(v & 0xff);
}
static void mpack_uint(char **buf, uint32_t val)
{
if (val > 0xffff) {
mpack_w(buf, 0xce);
mpack_w4(buf, val);
} else if (val > 0xff) {
mpack_w(buf, 0xcd);
mpack_w2(buf, val);
} else if (val > 0x7f) {
mpack_w(buf, 0xcc);
mpack_w(buf, val);
} else {
mpack_w(buf, val);
}
}
static void mpack_bool(char **buf, bool val)
{
mpack_w(buf, 0xc2 | (val ? 1 : 0));
}
static void mpack_array(char **buf, uint32_t len)
{
if (len < 0x10) {
mpack_w(buf, 0x90 | len);
} else if (len < 0x10000) {
mpack_w(buf, 0xdc);
mpack_w2(buf, len);
} else {
mpack_w(buf, 0xdd);
mpack_w4(buf, len);
}
}
static char *mpack_array_dyn16(char **buf)
{
mpack_w(buf, 0xdc);
@ -104,9 +56,9 @@ static char *mpack_array_dyn16(char **buf)
return pos;
}
static void mpack_str(char **buf, const char *str, size_t len)
static void mpack_str_small(char **buf, const char *str, size_t len)
{
assert(sizeof(schar_T) - 1 < 0x20);
assert(len < 0x20);
mpack_w(buf, 0xa0 | len);
memcpy(*buf, str, len);
*buf += len;
@ -117,6 +69,7 @@ static void remote_ui_destroy(UI *ui)
{
UIData *data = ui->data;
kv_destroy(data->call_buf);
xfree(data->packer.startptr);
XFREE_CLEAR(ui->term_name);
xfree(ui);
}
@ -231,8 +184,13 @@ void nvim_ui_attach(uint64_t channel_id, Integer width, Integer height, Dictiona
data->ncalls_pos = NULL;
data->ncalls = 0;
data->ncells_pending = 0;
data->buf_wptr = data->buf;
data->temp_buf = NULL;
data->packer = (PackerBuffer) {
.startptr = NULL,
.ptr = NULL,
.endptr = NULL,
.packer_flush = ui_flush_callback,
.anydata = data,
};
data->wildmenu_active = false;
data->call_buf = (Array)ARRAY_DICT_INIT;
kv_ensure_space(data->call_buf, 16);
@ -561,72 +519,29 @@ void nvim_ui_term_event(uint64_t channel_id, String event, Object value, Error *
static void flush_event(UIData *data)
{
if (data->cur_event) {
mpack_w2(&data->ncalls_pos, data->ncalls);
mpack_w2(&data->ncalls_pos, 1 + data->ncalls);
data->cur_event = NULL;
}
if (!data->nevents_pos) {
assert(BUF_POS(data) == 0);
char **buf = &data->buf_wptr;
// [2, "redraw", [...]]
mpack_array(buf, 3);
mpack_uint(buf, 2);
mpack_str(buf, S_LEN("redraw"));
data->nevents_pos = mpack_array_dyn16(buf);
data->ncalls_pos = NULL;
data->ncalls = 0;
}
}
static inline int write_cb(void *vdata, const char *buf, size_t len)
static void ui_alloc_buf(UIData *data)
{
UIData *data = (UIData *)vdata;
if (!buf) {
return 0;
}
data->pack_totlen += len;
if (!data->temp_buf && UI_BUF_SIZE - BUF_POS(data) < len) {
return 0;
}
memcpy(data->buf_wptr, buf, len);
data->buf_wptr += len;
return 0;
data->packer.startptr = alloc_block();
data->packer.ptr = data->packer.startptr;
data->packer.endptr = data->packer.startptr + UI_BUF_SIZE;
}
static inline int size_cb(void *vdata, const char *buf, size_t len)
{
UIData *data = (UIData *)vdata;
if (!buf) {
return 0;
}
data->pack_totlen += len;
return 0;
}
static void prepare_call(UI *ui, const char *name, size_t size_needed)
static void prepare_call(UI *ui, const char *name)
{
UIData *data = ui->data;
size_t name_len = strlen(name);
const size_t overhead = name_len + 20;
bool oversized_message = size_needed + overhead > UI_BUF_SIZE;
if (oversized_message || BUF_POS(data) > UI_BUF_SIZE - size_needed - overhead) {
remote_ui_flush_buf(ui);
if (data->packer.startptr && BUF_POS(data) > UI_BUF_SIZE - EVENT_BUF_SIZE) {
ui_flush_buf(data);
}
if (oversized_message) {
// TODO(bfredl): manually testable by setting UI_BUF_SIZE to 1024 (mode_info_set)
data->temp_buf = xmalloc(20 + name_len + size_needed);
data->buf_wptr = data->temp_buf;
char **buf = &data->buf_wptr;
mpack_array(buf, 3);
mpack_uint(buf, 2);
mpack_str(buf, S_LEN("redraw"));
mpack_array(buf, 1);
mpack_array(buf, 2);
mpack_str(buf, name, name_len);
return;
if (data->packer.startptr == NULL) {
ui_alloc_buf(data);
}
// To optimize data transfer(especially for "grid_line"), we bundle adjacent
@ -634,26 +549,23 @@ static void prepare_call(UI *ui, const char *name, size_t size_needed)
// method call is different from "name"
if (!data->cur_event || !strequal(data->cur_event, name)) {
char **buf = &data->packer.ptr;
if (!data->nevents_pos) {
// [2, "redraw", [...]]
mpack_array(buf, 3);
mpack_uint(buf, 2);
mpack_str_small(buf, S_LEN("redraw"));
data->nevents_pos = mpack_array_dyn16(buf);
assert(data->cur_event == NULL);
}
flush_event(data);
data->cur_event = name;
char **buf = &data->buf_wptr;
data->ncalls_pos = mpack_array_dyn16(buf);
mpack_str(buf, name, strlen(name));
mpack_str_small(buf, name, strlen(name));
data->nevents++;
data->ncalls = 1;
return;
}
}
static void send_oversized_message(UIData *data)
{
if (data->temp_buf) {
size_t size = (size_t)(data->buf_wptr - data->temp_buf);
WBuffer *buf = wstream_new_buffer(data->temp_buf, size, 1, xfree);
rpc_write_raw(data->channel_id, buf);
data->temp_buf = NULL;
data->buf_wptr = data->buf;
data->nevents_pos = NULL;
} else {
data->ncalls++;
}
}
@ -661,23 +573,15 @@ static void send_oversized_message(UIData *data)
static void push_call(UI *ui, const char *name, Array args)
{
UIData *data = ui->data;
prepare_call(ui, name);
mpack_object_array(args, &data->packer);
}
msgpack_packer pac;
data->pack_totlen = 0;
// First determine the needed size
msgpack_packer_init(&pac, data, size_cb);
msgpack_rpc_from_array(args, &pac);
// Then send the actual message
prepare_call(ui, name, data->pack_totlen);
msgpack_packer_init(&pac, data, write_cb);
msgpack_rpc_from_array(args, &pac);
// Oversized messages need to be sent immediately
if (data->temp_buf) {
send_oversized_message(data);
}
data->ncalls++;
static void ui_flush_callback(PackerBuffer *packer)
{
UIData *data = packer->anydata;
ui_flush_buf(data);
ui_alloc_buf(data);
}
void remote_ui_grid_clear(UI *ui, Integer grid)
@ -869,12 +773,15 @@ void remote_ui_raw_line(UI *ui, Integer grid, Integer row, Integer startcol, Int
Integer clearcol, Integer clearattr, LineFlags flags, const schar_T *chunk,
const sattr_T *attrs)
{
// If MAX_SCHAR_SIZE is made larger, we need to refactor implementation below
// to not only use FIXSTR (only up to 0x20 bytes)
STATIC_ASSERT(MAX_SCHAR_SIZE - 1 < 0x20, "SCHAR doesn't fit in fixstr");
UIData *data = ui->data;
if (ui->ui_ext[kUILinegrid]) {
prepare_call(ui, "grid_line", EVENT_BUF_SIZE);
data->ncalls++;
prepare_call(ui, "grid_line");
char **buf = &data->buf_wptr;
char **buf = &data->packer.ptr;
mpack_array(buf, 5);
mpack_uint(buf, (uint32_t)grid);
mpack_uint(buf, (uint32_t)row);
@ -898,10 +805,9 @@ void remote_ui_raw_line(UI *ui, Integer grid, Integer row, Integer startcol, Int
// We only ever set the wrap field on the final "grid_line" event for the line.
mpack_bool(buf, false);
remote_ui_flush_buf(ui);
ui_flush_buf(data);
prepare_call(ui, "grid_line", EVENT_BUF_SIZE);
data->ncalls++;
prepare_call(ui, "grid_line");
mpack_array(buf, 5);
mpack_uint(buf, (uint32_t)grid);
mpack_uint(buf, (uint32_t)row);
@ -934,7 +840,7 @@ void remote_ui_raw_line(UI *ui, Integer grid, Integer row, Integer startcol, Int
nelem++;
data->ncells_pending += 1;
mpack_array(buf, 3);
mpack_str(buf, S_LEN(" "));
mpack_str_small(buf, S_LEN(" "));
mpack_uint(buf, (uint32_t)clearattr);
mpack_uint(buf, (uint32_t)(clearcol - endcol));
}
@ -943,7 +849,7 @@ void remote_ui_raw_line(UI *ui, Integer grid, Integer row, Integer startcol, Int
if (data->ncells_pending > 500) {
// pass off cells to UI to let it start processing them
remote_ui_flush_buf(ui);
ui_flush_buf(data);
}
} else {
for (int i = 0; i < endcol - startcol; i++) {
@ -977,28 +883,27 @@ void remote_ui_raw_line(UI *ui, Integer grid, Integer row, Integer startcol, Int
///
/// This might happen multiple times before the actual ui_flush, if the
/// total redraw size is large!
void remote_ui_flush_buf(UI *ui)
static void ui_flush_buf(UIData *data)
{
UIData *data = ui->data;
if (!data->nevents_pos) {
if (!data->packer.startptr || !BUF_POS(data)) {
return;
}
if (data->cur_event) {
flush_event(data);
flush_event(data);
if (data->nevents_pos != NULL) {
mpack_w2(&data->nevents_pos, data->nevents);
data->nevents = 0;
data->nevents_pos = NULL;
}
mpack_w2(&data->nevents_pos, data->nevents);
data->nevents = 0;
data->nevents_pos = NULL;
// TODO(bfredl): elide copy by a length one free-list like the arena
size_t size = BUF_POS(data);
WBuffer *buf = wstream_new_buffer(xmemdup(data->buf, size), size, 1, xfree);
WBuffer *buf = wstream_new_buffer(data->packer.startptr, BUF_POS(data), 1, free_block);
rpc_write_raw(data->channel_id, buf);
data->buf_wptr = data->buf;
// we have sent events to the client, but possibly not yet the final "flush"
// event.
data->flushed_events = true;
data->packer.startptr = NULL;
data->packer.ptr = NULL;
// we have sent events to the client, but possibly not yet the final "flush" event.
data->flushed_events = true;
data->ncells_pending = 0;
}
@ -1014,7 +919,7 @@ void remote_ui_flush(UI *ui)
remote_ui_cursor_goto(ui, data->cursor_row, data->cursor_col);
}
push_call(ui, "flush", (Array)ARRAY_DICT_INIT);
remote_ui_flush_buf(ui);
ui_flush_buf(data);
data->flushed_events = false;
}
}

View File

@ -307,7 +307,6 @@ output:write([[
#include "nvim/globals.h"
#include "nvim/log.h"
#include "nvim/map_defs.h"
#include "nvim/msgpack_rpc/helpers.h"
#include "nvim/api/autocmd.h"
#include "nvim/api/buffer.h"

View File

@ -1381,9 +1381,11 @@ void nlua_push_keydict(lua_State *L, void *value, KeySetLink *table)
lua_pushstring(L, field->str);
if (field->type == kObjectTypeNil) {
nlua_push_Object(L, (Object *)mem, false);
} else if (field->type == kObjectTypeInteger || field->type == kObjectTypeBuffer
|| field->type == kObjectTypeWindow || field->type == kObjectTypeTabpage) {
} else if (field->type == kObjectTypeInteger) {
lua_pushinteger(L, *(Integer *)mem);
} else if (field->type == kObjectTypeBuffer || field->type == kObjectTypeWindow
|| field->type == kObjectTypeTabpage) {
lua_pushinteger(L, *(handle_T *)mem);
} else if (field->type == kObjectTypeFloat) {
lua_pushnumber(L, *(Float *)mem);
} else if (field->type == kObjectTypeBoolean) {

View File

@ -70,7 +70,6 @@
#include "nvim/mouse.h"
#include "nvim/move.h"
#include "nvim/msgpack_rpc/channel.h"
#include "nvim/msgpack_rpc/helpers.h"
#include "nvim/msgpack_rpc/server.h"
#include "nvim/normal.h"
#include "nvim/ops.h"
@ -152,11 +151,9 @@ void event_init(void)
loop_init(&main_loop, NULL);
resize_events = multiqueue_new_child(main_loop.events);
// early msgpack-rpc initialization
msgpack_rpc_helpers_init();
input_init();
signal_init();
// finish mspgack-rpc initialization
// mspgack-rpc initialization
channel_init();
terminal_init();
ui_init();

View File

@ -554,7 +554,6 @@ void time_to_bytes(time_t time_, uint8_t buf[8])
}
}
#define ARENA_BLOCK_SIZE 4096
#define REUSE_MAX 4
static struct consumed_blk *arena_reuse_blk;
@ -583,17 +582,26 @@ ArenaMem arena_finish(Arena *arena)
return res;
}
void alloc_block(Arena *arena)
/// allocate a block of ARENA_BLOCK_SIZE
///
/// free it with free_block
void *alloc_block(void)
{
struct consumed_blk *prev_blk = (struct consumed_blk *)arena->cur_blk;
if (arena_reuse_blk_count > 0) {
arena->cur_blk = (char *)arena_reuse_blk;
void *retval = (char *)arena_reuse_blk;
arena_reuse_blk = arena_reuse_blk->prev;
arena_reuse_blk_count--;
return retval;
} else {
arena_alloc_count++;
arena->cur_blk = xmalloc(ARENA_BLOCK_SIZE);
return xmalloc(ARENA_BLOCK_SIZE);
}
}
void arena_alloc_block(Arena *arena)
{
struct consumed_blk *prev_blk = (struct consumed_blk *)arena->cur_blk;
arena->cur_blk = alloc_block();
arena->pos = 0;
arena->size = ARENA_BLOCK_SIZE;
struct consumed_blk *blk = arena_alloc(arena, sizeof(struct consumed_blk), true);
@ -615,7 +623,7 @@ void *arena_alloc(Arena *arena, size_t size, bool align)
return xmalloc(size);
}
if (!arena->cur_blk) {
alloc_block(arena);
arena_alloc_block(arena);
}
size_t alloc_pos = align ? arena_align_offset(arena->pos) : arena->pos;
if (alloc_pos + size > arena->size) {
@ -637,7 +645,7 @@ void *arena_alloc(Arena *arena, size_t size, bool align)
cur_blk->prev = fix_blk;
return alloc + aligned_hdr_size;
} else {
alloc_block(arena); // resets arena->pos
arena_alloc_block(arena); // resets arena->pos
alloc_pos = align ? arena_align_offset(arena->pos) : arena->pos;
}
}
@ -647,17 +655,27 @@ void *arena_alloc(Arena *arena, size_t size, bool align)
return mem;
}
void free_block(void *block)
{
if (arena_reuse_blk_count < REUSE_MAX) {
struct consumed_blk *reuse_blk = block;
reuse_blk->prev = arena_reuse_blk;
arena_reuse_blk = reuse_blk;
arena_reuse_blk_count++;
} else {
xfree(block);
}
}
void arena_mem_free(ArenaMem mem)
{
struct consumed_blk *b = mem;
// peel of the first block, as it is guaranteed to be ARENA_BLOCK_SIZE,
// not a custom fix_blk
if (arena_reuse_blk_count < REUSE_MAX && b != NULL) {
if (b != NULL) {
struct consumed_blk *reuse_blk = b;
b = b->prev;
reuse_blk->prev = arena_reuse_blk;
arena_reuse_blk = reuse_blk;
arena_reuse_blk_count++;
free_block(reuse_blk);
}
while (b) {
@ -695,7 +713,6 @@ char *arena_memdupz(Arena *arena, const char *buf, size_t size)
# include "nvim/grid.h"
# include "nvim/mark.h"
# include "nvim/msgpack_rpc/channel.h"
# include "nvim/msgpack_rpc/helpers.h"
# include "nvim/ops.h"
# include "nvim/option.h"
# include "nvim/os/os.h"
@ -862,7 +879,6 @@ void free_all_mem(void)
ui_comp_free_all_mem();
nlua_free_all_mem();
rpc_free_all_mem();
msgpack_rpc_helpers_free_all_mem();
// should be last, in case earlier free functions deallocates arenas
arena_free_reuse_blks();

View File

@ -45,6 +45,8 @@ EXTERN size_t arena_alloc_count INIT( = 0);
((v).capacity = (s), \
(v).items = (void *)arena_alloc(a, sizeof((v).items[0]) * (v).capacity, true))
#define ARENA_BLOCK_SIZE 4096
#ifdef INCLUDE_GENERATED_DECLARATIONS
# include "memory.h.generated.h"
#endif

View File

@ -1,7 +1,6 @@
#include <assert.h>
#include <inttypes.h>
#include <msgpack/object.h>
#include <msgpack/pack.h>
#include <msgpack/sbuffer.h>
#include <msgpack/unpack.h>
#include <stdbool.h>
@ -29,7 +28,7 @@
#include "nvim/message.h"
#include "nvim/msgpack_rpc/channel.h"
#include "nvim/msgpack_rpc/channel_defs.h"
#include "nvim/msgpack_rpc/helpers.h"
#include "nvim/msgpack_rpc/packer.h"
#include "nvim/msgpack_rpc/unpacker.h"
#include "nvim/os/input.h"
#include "nvim/rbuffer.h"
@ -44,73 +43,31 @@
# define NOT "[notify] "
# define ERR "[error] "
// Cannot define array with negative offsets, so this one is needed to be added
// to MSGPACK_UNPACK_\* values.
# define MUR_OFF 2
# define SEND "->"
# define RECV "<-"
static const char *const msgpack_error_messages[] = {
[MSGPACK_UNPACK_EXTRA_BYTES + MUR_OFF] = "extra bytes found",
[MSGPACK_UNPACK_CONTINUE + MUR_OFF] = "incomplete string",
[MSGPACK_UNPACK_PARSE_ERROR + MUR_OFF] = "parse error",
[MSGPACK_UNPACK_NOMEM_ERROR + MUR_OFF] = "not enough memory",
};
static void log_close(FILE *f)
static void log_request(char *dir, uint64_t channel_id, uint32_t req_id, const char *name)
{
fputc('\n', f);
fflush(f);
fclose(f);
log_unlock();
DLOGN("RPC %s %" PRIu64 ": %s id=%u: %s\n", dir, channel_id, REQ, req_id, name);
}
static void log_server_msg(uint64_t channel_id, msgpack_sbuffer *packed)
static void log_response(char *dir, uint64_t channel_id, char *kind, uint32_t req_id)
{
msgpack_unpacked unpacked;
msgpack_unpacked_init(&unpacked);
DLOGN("RPC ->ch %" PRIu64 ": ", channel_id);
const msgpack_unpack_return result =
msgpack_unpack_next(&unpacked, packed->data, packed->size, NULL);
switch (result) {
case MSGPACK_UNPACK_SUCCESS: {
uint64_t type = unpacked.data.via.array.ptr[0].via.u64;
log_lock();
FILE *f = open_log_file();
fprintf(f, type ? (type == 1 ? RES : NOT) : REQ);
msgpack_object_print(f, unpacked.data);
log_close(f);
msgpack_unpacked_destroy(&unpacked);
break;
}
case MSGPACK_UNPACK_EXTRA_BYTES:
case MSGPACK_UNPACK_CONTINUE:
case MSGPACK_UNPACK_PARSE_ERROR:
case MSGPACK_UNPACK_NOMEM_ERROR: {
log_lock();
FILE *f = open_log_file();
fprintf(f, ERR);
fprintf(f, "%s", msgpack_error_messages[result + MUR_OFF]);
log_close(f);
break;
}
}
DLOGN("RPC %s %" PRIu64 ": %s id=%u\n", dir, channel_id, kind, req_id);
}
static void log_client_msg(uint64_t channel_id, bool is_request, const char *name)
static void log_notify(char *dir, uint64_t channel_id, const char *name)
{
DLOGN("RPC <-ch %" PRIu64 ": ", channel_id);
log_lock();
FILE *f = open_log_file();
fprintf(f, "%s: %s", is_request ? REQ : RES, name);
log_close(f);
DLOGN("RPC %s %" PRIu64 ": %s %s\n", dir, channel_id, NOT, name);
}
#else
# define log_client_msg(...)
# define log_server_msg(...)
# define log_request(...)
# define log_response(...)
# define log_notify(...)
#endif
static Set(cstr_t) event_strings = SET_INIT;
static msgpack_sbuffer out_buffer;
#ifdef INCLUDE_GENERATED_DECLARATIONS
# include "msgpack_rpc/channel.c.generated.h"
@ -119,7 +76,6 @@ static msgpack_sbuffer out_buffer;
void rpc_init(void)
{
ch_before_blocking_events = multiqueue_new_child(main_loop.events);
msgpack_sbuffer_init(&out_buffer);
}
void rpc_start(Channel *channel)
@ -169,8 +125,9 @@ bool rpc_send_event(uint64_t id, const char *name, Array args)
return false;
}
log_notify(SEND, channel ? channel->id : 0, name);
if (channel) {
send_event(channel, name, args);
serialize_request(&channel, 1, 0, name, args);
} else {
broadcast_event(name, args);
}
@ -199,7 +156,9 @@ Object rpc_send_call(uint64_t id, const char *method_name, Array args, ArenaMem
RpcState *rpc = &channel->rpc;
uint32_t request_id = rpc->next_request_id++;
// Send the msgpack-rpc request
send_request(channel, request_id, method_name, args);
serialize_request(&channel, 1, request_id, method_name, args);
log_request(SEND, channel->id, request_id, method_name);
// Push the frame
ChannelCallFrame frame = { request_id, false, false, NIL, NULL };
@ -361,8 +320,13 @@ static void parse_msgpack(Channel *channel)
frame->result = p->result;
}
frame->result_mem = arena_finish(&p->arena);
log_response(RECV, channel->id, frame->errored ? ERR : RES, p->request_id);
} else {
log_client_msg(channel->id, p->type == kMessageTypeRequest, p->handler.name);
if (p->type == kMessageTypeNotification) {
log_notify(RECV, channel->id, p->handler.name);
} else {
log_request(RECV, channel->id, p->request_id, p->handler.name);
}
Object res = p->result;
if (p->result.type != kObjectTypeArray) {
@ -442,15 +406,7 @@ static void request_event(void **argv)
Object result = handler.fn(channel->id, e->args, &e->used_mem, &error);
if (e->type == kMessageTypeRequest || ERROR_SET(&error)) {
// Send the response.
msgpack_packer response;
msgpack_packer_init(&response, &out_buffer, msgpack_sbuffer_write);
channel_write(channel, serialize_response(channel->id,
e->handler,
e->type,
e->request_id,
&error,
&result,
&out_buffer));
serialize_response(channel, e->handler, e->type, e->request_id, &error, &result);
}
if (handler.ret_alloc) {
api_free_object(result);
@ -533,41 +489,14 @@ static void send_error(Channel *chan, MsgpackRpcRequestHandler handler, MessageT
{
Error e = ERROR_INIT;
api_set_error(&e, kErrorTypeException, "%s", err);
channel_write(chan, serialize_response(chan->id,
handler,
type,
id,
&e,
&NIL,
&out_buffer));
serialize_response(chan, handler, type, id, &e, &NIL);
api_clear_error(&e);
}
static void send_request(Channel *channel, uint32_t id, const char *name, Array args)
{
const String method = cstr_as_string(name);
channel_write(channel, serialize_request(channel->id,
id,
method,
args,
&out_buffer,
1));
}
static void send_event(Channel *channel, const char *name, Array args)
{
const String method = cstr_as_string(name);
channel_write(channel, serialize_request(channel->id,
0,
method,
args,
&out_buffer,
1));
}
static void broadcast_event(const char *name, Array args)
{
kvec_t(Channel *) subscribed = KV_INITIAL_VALUE;
kvec_withinit_t(Channel *, 4) subscribed = KV_INITIAL_VALUE;
kvi_init(subscribed);
Channel *channel;
map_foreach_value(&channels, channel, {
@ -577,25 +506,11 @@ static void broadcast_event(const char *name, Array args)
}
});
if (!kv_size(subscribed)) {
goto end;
if (kv_size(subscribed)) {
serialize_request(subscribed.items, kv_size(subscribed), 0, name, args);
}
const String method = cstr_as_string(name);
WBuffer *buffer = serialize_request(0,
0,
method,
args,
&out_buffer,
kv_size(subscribed));
for (size_t i = 0; i < kv_size(subscribed); i++) {
Channel *c = kv_A(subscribed, i);
channel_write(c, buffer);
}
end:
kv_destroy(subscribed);
kvi_destroy(subscribed);
}
static void unsubscribe(Channel *channel, char *event)
@ -653,27 +568,28 @@ static void chan_close_with_error(Channel *channel, char *msg, int loglevel)
channel_close(channel->id, kChannelPartRpc, NULL);
}
static WBuffer *serialize_request(uint64_t channel_id, uint32_t request_id, const String method,
Array args, msgpack_sbuffer *sbuffer, size_t refcount)
static void serialize_request(Channel **chans, size_t nchans, uint32_t request_id,
const char *method, Array args)
{
msgpack_packer pac;
msgpack_packer_init(&pac, sbuffer, msgpack_sbuffer_write);
msgpack_rpc_serialize_request(request_id, method, args, &pac);
log_server_msg(channel_id, sbuffer);
WBuffer *rv = wstream_new_buffer(xmemdup(sbuffer->data, sbuffer->size),
sbuffer->size,
refcount,
xfree);
msgpack_sbuffer_clear(sbuffer);
return rv;
PackerBuffer packer;
packer_buffer_init_channels(chans, nchans, &packer);
mpack_array(&packer.ptr, request_id ? 4 : 3);
mpack_w(&packer.ptr, request_id ? 0 : 2);
if (request_id) {
mpack_uint(&packer.ptr, request_id);
}
mpack_str(cstr_as_string(method), &packer);
mpack_object_array(args, &packer);
packer_buffer_finish_channels(&packer);
}
static WBuffer *serialize_response(uint64_t channel_id, MsgpackRpcRequestHandler handler,
MessageType type, uint32_t response_id, Error *err, Object *arg,
msgpack_sbuffer *sbuffer)
void serialize_response(Channel *channel, MsgpackRpcRequestHandler handler, MessageType type,
uint32_t response_id, Error *err, Object *arg)
{
msgpack_packer pac;
msgpack_packer_init(&pac, sbuffer, msgpack_sbuffer_write);
if (ERROR_SET(err) && type == kMessageTypeNotification) {
if (handler.fn == handle_nvim_paste) {
// TODO(bfredl): this is pretty much ad-hoc. maybe TUI and UI:s should be
@ -685,19 +601,65 @@ static WBuffer *serialize_response(uint64_t channel_id, MsgpackRpcRequestHandler
MAXSIZE_TEMP_ARRAY(args, 2);
ADD_C(args, INTEGER_OBJ(err->type));
ADD_C(args, CSTR_AS_OBJ(err->msg));
msgpack_rpc_serialize_request(0, cstr_as_string("nvim_error_event"),
args, &pac);
serialize_request(&channel, 1, 0, "nvim_error_event", args);
}
return;
}
PackerBuffer packer;
packer_buffer_init_channels(&channel, 1, &packer);
mpack_array(&packer.ptr, 4);
mpack_w(&packer.ptr, 1);
mpack_uint(&packer.ptr, response_id);
if (ERROR_SET(err)) {
// error represented by a [type, message] array
mpack_array(&packer.ptr, 2);
mpack_integer(&packer.ptr, err->type);
mpack_str(cstr_as_string(err->msg), &packer);
// Nil result
mpack_nil(&packer.ptr);
} else {
// Nil error
mpack_nil(&packer.ptr);
// Return value
mpack_object(arg, &packer);
}
packer_buffer_finish_channels(&packer);
log_response(SEND, channel->id, ERROR_SET(err) ? ERR : RES, response_id);
}
static void packer_buffer_init_channels(Channel **chans, size_t nchans, PackerBuffer *packer)
{
packer->startptr = alloc_block();
packer->ptr = packer->startptr;
packer->endptr = packer->startptr + ARENA_BLOCK_SIZE;
packer->packer_flush = channel_flush_callback;
packer->anydata = chans;
packer->anylen = nchans;
}
static void packer_buffer_finish_channels(PackerBuffer *packer)
{
size_t len = (size_t)(packer->ptr - packer->startptr);
if (len > 0) {
WBuffer *buf = wstream_new_buffer(packer->startptr, len, packer->anylen, free_block);
Channel **chans = packer->anydata;
for (size_t i = 0; i < packer->anylen; i++) {
channel_write(chans[i], buf);
}
} else {
msgpack_rpc_serialize_response(response_id, err, arg, &pac);
free_block(packer->startptr);
}
log_server_msg(channel_id, sbuffer);
WBuffer *rv = wstream_new_buffer(xmemdup(sbuffer->data, sbuffer->size),
sbuffer->size,
1, // responses only go though 1 channel
xfree);
msgpack_sbuffer_clear(sbuffer);
return rv;
}
static void channel_flush_callback(PackerBuffer *packer)
{
packer_buffer_finish_channels(packer);
packer_buffer_init_channels(packer->anydata, packer->anylen, packer);
}
void rpc_set_client_info(uint64_t id, Dictionary info)
@ -762,7 +724,6 @@ void rpc_free_all_mem(void)
});
set_destroy(cstr_t, &event_strings);
msgpack_sbuffer_destroy(&out_buffer);
multiqueue_free(ch_before_blocking_events);
}
#endif

View File

@ -1,245 +0,0 @@
#include <msgpack/object.h>
#include <msgpack/sbuffer.h>
#include <msgpack/unpack.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include "klib/kvec.h"
#include "msgpack/pack.h"
#include "nvim/api/private/helpers.h"
#include "nvim/assert_defs.h"
#include "nvim/lua/executor.h"
#include "nvim/memory.h"
#include "nvim/msgpack_rpc/helpers.h"
#include "nvim/types_defs.h"
#ifdef INCLUDE_GENERATED_DECLARATIONS
# include "msgpack_rpc/helpers.c.generated.h"
#endif
static msgpack_sbuffer sbuffer;
void msgpack_rpc_helpers_init(void)
{
msgpack_sbuffer_init(&sbuffer);
}
#ifdef EXITFREE
void msgpack_rpc_helpers_free_all_mem(void)
{
msgpack_sbuffer_destroy(&sbuffer);
}
#endif
// uncrustify:off
void msgpack_rpc_from_boolean(Boolean result, msgpack_packer *res)
FUNC_ATTR_NONNULL_ARG(2)
{
if (result) {
msgpack_pack_true(res);
} else {
msgpack_pack_false(res);
}
}
void msgpack_rpc_from_integer(Integer result, msgpack_packer *res)
FUNC_ATTR_NONNULL_ARG(2)
{
msgpack_pack_int64(res, result);
}
void msgpack_rpc_from_float(Float result, msgpack_packer *res)
FUNC_ATTR_NONNULL_ARG(2)
{
msgpack_pack_double(res, result);
}
void msgpack_rpc_from_string(const String result, msgpack_packer *res)
FUNC_ATTR_NONNULL_ARG(2)
{
msgpack_pack_str(res, result.size);
if (result.size > 0) {
msgpack_pack_str_body(res, result.data, result.size);
}
}
static void msgpack_rpc_from_handle(ObjectType type, Integer o, msgpack_packer *res)
FUNC_ATTR_NONNULL_ARG(3)
{
msgpack_packer pac;
msgpack_packer_init(&pac, &sbuffer, msgpack_sbuffer_write);
msgpack_pack_int64(&pac, (handle_T)o);
msgpack_pack_ext(res, sbuffer.size, (int8_t)(type - EXT_OBJECT_TYPE_SHIFT));
msgpack_pack_ext_body(res, sbuffer.data, sbuffer.size);
msgpack_sbuffer_clear(&sbuffer);
}
typedef struct {
Object *aobj;
bool container;
size_t idx;
} APIToMPObjectStackItem;
/// Convert type used by Nvim API to msgpack type.
///
/// consumes (frees) any luaref inside `result`, even though they are not used
/// (just represented as NIL)
///
/// @param[in] result Object to convert.
/// @param[out] res Structure that defines where conversion results are saved.
///
/// @return true in case of success, false otherwise.
void msgpack_rpc_from_object(Object *result, msgpack_packer *const res)
FUNC_ATTR_NONNULL_ARG(2)
{
kvec_withinit_t(APIToMPObjectStackItem, 2) stack = KV_INITIAL_VALUE;
kvi_init(stack);
kvi_push(stack, ((APIToMPObjectStackItem) { result, false, 0 }));
while (kv_size(stack)) {
APIToMPObjectStackItem cur = kv_last(stack);
STATIC_ASSERT(kObjectTypeWindow == kObjectTypeBuffer + 1
&& kObjectTypeTabpage == kObjectTypeWindow + 1,
"Buffer, window and tabpage enum items are in order");
switch (cur.aobj->type) {
case kObjectTypeLuaRef:
// TODO(bfredl): could also be an error. Though kObjectTypeLuaRef
// should only appear when the caller has opted in to handle references,
// see nlua_pop_Object.
api_free_luaref(cur.aobj->data.luaref);
cur.aobj->data.luaref = LUA_NOREF;
FALLTHROUGH;
case kObjectTypeNil:
msgpack_pack_nil(res);
break;
case kObjectTypeBoolean:
msgpack_rpc_from_boolean(cur.aobj->data.boolean, res);
break;
case kObjectTypeInteger:
msgpack_rpc_from_integer(cur.aobj->data.integer, res);
break;
case kObjectTypeFloat:
msgpack_rpc_from_float(cur.aobj->data.floating, res);
break;
case kObjectTypeString:
msgpack_rpc_from_string(cur.aobj->data.string, res);
break;
case kObjectTypeBuffer:
case kObjectTypeWindow:
case kObjectTypeTabpage:
msgpack_rpc_from_handle(cur.aobj->type, cur.aobj->data.integer, res);
break;
case kObjectTypeArray: {
const size_t size = cur.aobj->data.array.size;
if (cur.container) {
if (cur.idx >= size) {
(void)kv_pop(stack);
} else {
const size_t idx = cur.idx;
cur.idx++;
kv_last(stack) = cur;
kvi_push(stack, ((APIToMPObjectStackItem) {
.aobj = &cur.aobj->data.array.items[idx],
.container = false,
}));
}
} else {
msgpack_pack_array(res, size);
cur.container = true;
kv_last(stack) = cur;
}
break;
}
case kObjectTypeDictionary: {
const size_t size = cur.aobj->data.dictionary.size;
if (cur.container) {
if (cur.idx >= size) {
(void)kv_pop(stack);
} else {
const size_t idx = cur.idx;
cur.idx++;
kv_last(stack) = cur;
msgpack_rpc_from_string(cur.aobj->data.dictionary.items[idx].key, res);
kvi_push(stack, ((APIToMPObjectStackItem) {
.aobj = &cur.aobj->data.dictionary.items[idx].value,
.container = false,
}));
}
} else {
msgpack_pack_map(res, size);
cur.container = true;
kv_last(stack) = cur;
}
break;
}
}
if (!cur.container) {
(void)kv_pop(stack);
}
}
kvi_destroy(stack);
}
// uncrustify:on
void msgpack_rpc_from_array(Array result, msgpack_packer *res)
FUNC_ATTR_NONNULL_ARG(2)
{
msgpack_pack_array(res, result.size);
for (size_t i = 0; i < result.size; i++) {
msgpack_rpc_from_object(&result.items[i], res);
}
}
void msgpack_rpc_from_dictionary(Dictionary result, msgpack_packer *res)
FUNC_ATTR_NONNULL_ARG(2)
{
msgpack_pack_map(res, result.size);
for (size_t i = 0; i < result.size; i++) {
msgpack_rpc_from_string(result.items[i].key, res);
msgpack_rpc_from_object(&result.items[i].value, res);
}
}
/// Serializes a msgpack-rpc request or notification(id == 0)
void msgpack_rpc_serialize_request(uint32_t request_id, const String method, Array args,
msgpack_packer *pac)
FUNC_ATTR_NONNULL_ARG(4)
{
msgpack_pack_array(pac, request_id ? 4 : 3);
msgpack_pack_int(pac, request_id ? 0 : 2);
if (request_id) {
msgpack_pack_uint32(pac, request_id);
}
msgpack_rpc_from_string(method, pac);
msgpack_rpc_from_array(args, pac);
}
/// Serializes a msgpack-rpc response
void msgpack_rpc_serialize_response(uint32_t response_id, Error *err, Object *arg,
msgpack_packer *pac)
FUNC_ATTR_NONNULL_ALL
{
msgpack_pack_array(pac, 4);
msgpack_pack_int(pac, 1);
msgpack_pack_uint32(pac, response_id);
if (ERROR_SET(err)) {
// error represented by a [type, message] array
msgpack_pack_array(pac, 2);
msgpack_rpc_from_integer(err->type, pac);
msgpack_rpc_from_string(cstr_as_string(err->msg), pac);
// Nil result
msgpack_pack_nil(pac);
} else {
// Nil error
msgpack_pack_nil(pac);
// Return value
msgpack_rpc_from_object(arg, pac);
}
}

View File

@ -1,17 +0,0 @@
#pragma once
#include <msgpack.h> // IWYU pragma: keep
#include "nvim/api/private/defs.h"
/// Value by which objects represented as EXT type are shifted
///
/// Subtracted when packing, added when unpacking. Used to allow moving
/// buffer/window/tabpage block inside ObjectType enum. This block yet cannot be
/// split or reordered.
#define EXT_OBJECT_TYPE_SHIFT kObjectTypeBuffer
#define EXT_OBJECT_TYPE_MAX (kObjectTypeTabpage - EXT_OBJECT_TYPE_SHIFT)
#ifdef INCLUDE_GENERATED_DECLARATIONS
# include "msgpack_rpc/helpers.h.generated.h"
#endif

View File

@ -0,0 +1,235 @@
#include <assert.h>
#include "nvim/api/private/defs.h"
#include "nvim/lua/executor.h"
#include "nvim/msgpack_rpc/packer.h"
#ifdef INCLUDE_GENERATED_DECLARATIONS
# include "msgpack_rpc/packer.c.generated.h"
#endif
static void check_buffer(PackerBuffer *packer)
{
ptrdiff_t remaining = packer->endptr - packer->ptr;
if (remaining < MPACK_ITEM_SIZE) {
packer->packer_flush(packer);
}
}
static void mpack_w8(char **b, const char *data)
{
#ifdef ORDER_BIG_ENDIAN
memcpy(*b, data, 8);
*b += 8;
#else
for (int i = 7; i >= 0; i--) {
*(*b)++ = data[i];
}
#endif
}
void mpack_integer(char **ptr, Integer i)
{
if (i >= 0) {
if (i > 0xfffffff) {
mpack_w(ptr, 0xcf);
mpack_w8(ptr, (char *)&i);
} else {
mpack_uint(ptr, (uint32_t)i);
}
} else {
if (i < -0x80000000LL) {
mpack_w(ptr, 0xd3);
mpack_w8(ptr, (char *)&i);
} else if (i < -0x8000) {
mpack_w(ptr, 0xd2);
mpack_w4(ptr, (uint32_t)i);
} else if (i < -0x80) {
mpack_w(ptr, 0xd1);
mpack_w2(ptr, (uint32_t)i);
} else if (i < -0x20) {
mpack_w(ptr, 0xd0);
mpack_w(ptr, (char)i);
} else {
mpack_w(ptr, (char)i);
}
}
}
void mpack_float8(char **ptr, double i)
{
mpack_w(ptr, 0xcb);
mpack_w8(ptr, (char *)&i);
}
void mpack_str(String str, PackerBuffer *packer)
{
const size_t len = str.size;
if (len < 20) {
mpack_w(&packer->ptr, 0xa0 | len);
} else if (len < 0xff) {
mpack_w(&packer->ptr, 0xd9);
mpack_w(&packer->ptr, len);
} else if (len < 0xffff) {
mpack_w(&packer->ptr, 0xda);
mpack_w2(&packer->ptr, (uint32_t)len);
} else if (len < 0xffffffff) {
mpack_w(&packer->ptr, 0xdb);
mpack_w4(&packer->ptr, (uint32_t)len);
} else {
abort();
}
size_t pos = 0;
while (pos < len) {
ptrdiff_t remaining = packer->endptr - packer->ptr;
size_t to_copy = MIN(len - pos, (size_t)remaining);
memcpy(packer->ptr, str.data + pos, to_copy);
packer->ptr += to_copy;
pos += to_copy;
if (pos < len) {
packer->packer_flush(packer);
}
}
}
void mpack_handle(ObjectType type, handle_T handle, PackerBuffer *packer)
{
char exttype = (char)(type - EXT_OBJECT_TYPE_SHIFT);
if (-0x1f <= handle && handle <= 0x7f) {
mpack_w(&packer->ptr, 0xd4);
mpack_w(&packer->ptr, exttype);
mpack_w(&packer->ptr, (char)handle);
} else {
// we want to encode some small negative sentinel like -1. This is handled above
assert(handle >= 0);
// FAIL: we cannot use fixext 4/8 due to a design error
// (in theory fixext 2 for handle<=0xff but we don't gain much from it)
char buf[MPACK_ITEM_SIZE];
char *pos = buf;
mpack_uint(&pos, (uint32_t)handle);
ptrdiff_t packsize = pos - buf;
mpack_w(&packer->ptr, 0xc7);
mpack_w(&packer->ptr, packsize);
mpack_w(&packer->ptr, exttype);
// check_buffer(packer);
memcpy(packer->ptr, buf, (size_t)packsize);
packer->ptr += packsize;
}
}
void mpack_object(Object *obj, PackerBuffer *packer)
{
mpack_object_inner(obj, NULL, 0, packer);
}
void mpack_object_array(Array arr, PackerBuffer *packer)
{
mpack_array(&packer->ptr, (uint32_t)arr.size);
if (arr.size > 0) {
Object container = ARRAY_OBJ(arr);
mpack_object_inner(&arr.items[0], arr.size > 1 ? &container : NULL, 1, packer);
}
}
typedef struct {
Object *container;
size_t idx;
} ContainerStackItem;
void mpack_object_inner(Object *current, Object *container, size_t container_idx,
PackerBuffer *packer)
FUNC_ATTR_NONNULL_ARG(1, 4)
{
// The inner loop of this function packs "current" and then fetches the next
// value from "container". "stack" is only used for nested containers.
kvec_withinit_t(ContainerStackItem, 2) stack = KV_INITIAL_VALUE;
kvi_init(stack);
while (true) {
check_buffer(packer);
switch (current->type) {
case kObjectTypeLuaRef:
// TODO(bfredl): could also be an error. Though kObjectTypeLuaRef
// should only appear when the caller has opted in to handle references,
// see nlua_pop_Object.
api_free_luaref(current->data.luaref);
current->data.luaref = LUA_NOREF;
FALLTHROUGH;
case kObjectTypeNil:
mpack_nil(&packer->ptr);
break;
case kObjectTypeBoolean:
mpack_bool(&packer->ptr, current->data.boolean);
break;
case kObjectTypeInteger:
mpack_integer(&packer->ptr, current->data.integer);
break;
case kObjectTypeFloat:
mpack_float8(&packer->ptr, current->data.floating);
break;
case kObjectTypeString:
mpack_str(current->data.string, packer);
break;
case kObjectTypeBuffer:
case kObjectTypeWindow:
case kObjectTypeTabpage:
mpack_handle(current->type, (handle_T)current->data.integer, packer);
break;
case kObjectTypeDictionary:
case kObjectTypeArray: {}
size_t current_size;
if (current->type == kObjectTypeArray) {
current_size = current->data.array.size;
mpack_array(&packer->ptr, (uint32_t)current_size);
} else {
current_size = current->data.dictionary.size;
mpack_map(&packer->ptr, (uint32_t)current_size);
}
if (current_size > 0) {
if (current->type == kObjectTypeArray && current_size == 1) {
current = &current->data.array.items[0];
continue;
}
if (container) {
kvi_push(stack, ((ContainerStackItem) {
.container = container,
.idx = container_idx,
}));
}
container = current;
container_idx = 0;
}
break;
}
if (!container) {
if (kv_size(stack)) {
ContainerStackItem it = kv_pop(stack);
container = it.container;
container_idx = it.idx;
} else {
break;
}
}
if (container->type == kObjectTypeArray) {
Array arr = container->data.array;
current = &arr.items[container_idx++];
if (container_idx >= arr.size) {
container = NULL;
}
} else {
Dictionary dict = container->data.dictionary;
KeyValuePair *it = &dict.items[container_idx++];
check_buffer(packer);
mpack_str(it->key, packer);
current = &it->value;
if (container_idx >= dict.size) {
container = NULL;
}
}
}
kvi_destroy(stack);
}

View File

@ -0,0 +1,76 @@
#pragma once
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include "nvim/api/private/defs.h"
#include "nvim/msgpack_rpc/packer_defs.h"
#define mpack_w(b, byte) *(*(b))++ = (char)(byte);
static inline void mpack_w2(char **b, uint32_t v)
{
*(*b)++ = (char)((v >> 8) & 0xff);
*(*b)++ = (char)(v & 0xff);
}
static inline void mpack_w4(char **b, uint32_t v)
{
*(*b)++ = (char)((v >> 24) & 0xff);
*(*b)++ = (char)((v >> 16) & 0xff);
*(*b)++ = (char)((v >> 8) & 0xff);
*(*b)++ = (char)(v & 0xff);
}
static inline void mpack_uint(char **buf, uint32_t val)
{
if (val > 0xffff) {
mpack_w(buf, 0xce);
mpack_w4(buf, val);
} else if (val > 0xff) {
mpack_w(buf, 0xcd);
mpack_w2(buf, val);
} else if (val > 0x7f) {
mpack_w(buf, 0xcc);
mpack_w(buf, val);
} else {
mpack_w(buf, val);
}
}
#define mpack_nil(buf) mpack_w(buf, 0xc0)
static inline void mpack_bool(char **buf, bool val)
{
mpack_w(buf, 0xc2 | (val ? 1 : 0));
}
static inline void mpack_array(char **buf, uint32_t len)
{
if (len < 0x10) {
mpack_w(buf, 0x90 | len);
} else if (len < 0x10000) {
mpack_w(buf, 0xdc);
mpack_w2(buf, len);
} else {
mpack_w(buf, 0xdd);
mpack_w4(buf, len);
}
}
static inline void mpack_map(char **buf, uint32_t len)
{
if (len < 0x10) {
mpack_w(buf, 0x80 | len);
} else if (len < 0x10000) {
mpack_w(buf, 0xde);
mpack_w2(buf, len);
} else {
mpack_w(buf, 0xdf);
mpack_w4(buf, len);
}
}
#ifdef INCLUDE_GENERATED_DECLARATIONS
# include "msgpack_rpc/packer.h.generated.h"
#endif

View File

@ -0,0 +1,24 @@
#pragma once
#include <stddef.h>
#include <stdint.h>
// Max possible length: bytecode + 8 int/float bytes
// Ext objects are maximum 8=3+5 (nested uint32 payload)
#define MPACK_ITEM_SIZE 9
typedef struct packer_buffer_t PackerBuffer;
// Must ensure at least MPACK_ITEM_SIZE of space.
typedef void (*PackerBufferFlush)(PackerBuffer *self);
struct packer_buffer_t {
char *startptr;
char *ptr;
char *endptr;
// these are free to be used by packer_flush for any purpose, if want
void *anydata;
size_t anylen;
PackerBufferFlush packer_flush;
};

View File

@ -10,7 +10,6 @@
#include "nvim/macros_defs.h"
#include "nvim/memory.h"
#include "nvim/msgpack_rpc/channel_defs.h"
#include "nvim/msgpack_rpc/helpers.h"
#include "nvim/msgpack_rpc/unpacker.h"
#include "nvim/ui_client.h"

View File

@ -41,7 +41,6 @@
#include "nvim/mbyte.h"
#include "nvim/memory.h"
#include "nvim/message.h"
#include "nvim/msgpack_rpc/helpers.h"
#include "nvim/normal_defs.h"
#include "nvim/ops.h"
#include "nvim/option.h"

View File

@ -2173,7 +2173,7 @@ String arena_printf(Arena *arena, const char *fmt, ...)
char *buf = NULL;
if (arena) {
if (!arena->cur_blk) {
alloc_block(arena);
arena_alloc_block(arena);
}
// happy case, we can fit the printed string in the rest of the current

View File

@ -5,6 +5,7 @@
#include <stdint.h>
#include "nvim/api/private/defs.h"
#include "nvim/msgpack_rpc/packer_defs.h"
/// Keep in sync with ui_ext_names[] in ui.h
typedef enum {
@ -34,20 +35,16 @@ typedef struct ui_t UI;
typedef struct {
uint64_t channel_id;
#define UI_BUF_SIZE 4096 ///< total buffer size for pending msgpack data.
#define UI_BUF_SIZE ARENA_BLOCK_SIZE ///< total buffer size for pending msgpack data.
/// guaranteed size available for each new event (so packing of simple events
/// and the header of grid_line will never fail)
#define EVENT_BUF_SIZE 256
char buf[UI_BUF_SIZE]; ///< buffer of packed but not yet sent msgpack data
char *buf_wptr; ///< write head of buffer
PackerBuffer packer;
const char *cur_event; ///< name of current event (might get multiple arglists)
Array call_buf; ///< buffer for constructing a single arg list (max 16 elements!)
// state for write_cb, while packing a single arglist to msgpack. This
// might fail due to buffer overflow.
size_t pack_totlen;
char *temp_buf;
// We start packing the two outermost msgpack arrays before knowing the total
// number of elements. Thus track the location where array size will need
// to be written in the msgpack buffer, once the specific array is finished.