Skip to content

Commit

Permalink
refactor: Update build/install commands
Browse files Browse the repository at this point in the history
- Updates README.md with docs
- Adds some lua dev conveniences

Signed-off-by: John McBride <[email protected]>
  • Loading branch information
jpmcb committed Sep 2, 2023
1 parent 05006ee commit 5f25403
Show file tree
Hide file tree
Showing 7 changed files with 206 additions and 50 deletions.
85 changes: 83 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,86 @@
# 🦙 nvim-llama

Llama 2 interfaces for Neovim
_[Llama 2](https://ai.meta.com/llama/) and [llama.cpp](https://github.com/ggerganov/llama.cpp/) interfaces for Neovim_

🚧 _there's not alot here right now. Come back soon!_ 🚧
🏗️ 👷 Warning! Under active development!! 👷 🚧

# Installation

Use your favorite package manager to install the plugin:

### Packer

```lua
use 'jpmcb/nvim-llama'
```

### lazy.nvim

```lua
{
'jpmcb/nvim-llama'
}
```

### vim-plug

```lua
Plug 'jpmcb/nvim-llama'
```

# Setup & configuration

In your `init.vim`, setup the plugin:

```lua
require('nvim-llama').setup {}
```

You can provide the following optional configuration table to the `setup` function:

```lua
local defaults = {
-- See plugin debugging logs
debug = false,

-- Build llama.cpp for GPU acceleration on Apple M chip devices.
-- If you are using an Apple M1/M2 laptop, it is highly recommended to
-- use this since, depending on the model, may drastically increase performance.
build_metal = false,
}
```

# Models

Llama.cpp supports an incredible number of models.

To start using one, you'll need to download an appropriately sized model that
is supported by llama.cpp.

The 13B GGUF CodeLlama model is a really good place to start:
https://huggingface.co/TheBloke/CodeLlama-13B-GGUF

In order to use a model, it must be in the `llama.cpp/models/` directory which
is expected to be found at `~/.local/share/llama.cpp/models`.

The following script can be useful for downloading a model to that directory:

```sh
LLAMA_CPP="~/.local/share/nvim/llama.cpp"
MODEL="codellama-13b.Q4_K_M.gguf"

pushd "${LLAMA_CPP}"
if [ ! -f models/${MODEL} ]; then
curl -L "https://huggingface.co/TheBloke/CodeLlama-13B-GGUF/resolve/main/${MODEL}" -o models/${MODEL}
fi
popd
```

In the future, this project may provide the capability to download models automatically.

# License

This project is dual licensed under [MIT](./LICENSE.txt) (first party plugin code)
and the [Llama 2 license](./LICENSE.llama.txt).
By using this plugin, you agree to both terms and assert you have already have
[your own non-transferable license for Llama 2 from Meta AI](https://ai.meta.com/resources/models-and-libraries/llama-downloads/).
16 changes: 16 additions & 0 deletions get-model.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
#!/bin/bash

# This is an example convenience script to demonstrate downloading the 13b GGUF
# model for llama.cpp from Huggingface.
#
# It drops the model into the expected directory for nvim-llama and llama.cpp to
# be able to utilize it

LLAMA_CPP_CLONE="~/.local/share/nvim/llama.cpp"
MODEL="codellama-13b.Q4_K_M.gguf"

pushd "${LLAMA_CPP_CLONE}"
if [ ! -f models/${MODEL} ]; then
curl -L "https://huggingface.co/TheBloke/CodeLlama-13B-GGUF/resolve/main/${MODEL}" -o models/${MODEL}
fi
popd
5 changes: 5 additions & 0 deletions lua/.luarc.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
{
"diagnostics.globals": [
"vim"
]
}
18 changes: 0 additions & 18 deletions lua/nvim-llama/config.lua

This file was deleted.

38 changes: 30 additions & 8 deletions lua/nvim-llama/init.lua
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
local window = require("nvim-llama.window")
local settings = require("nvim-llama.settings")
local llama_cpp = require('nvim-llama.install')

local llama = {}
local M = {}

llama.interactive_llama = function()
M.interactive_llama = function()
local buf, win = window.create_floating_window()

-- Start terminal in the buffer with your binary
Expand All @@ -11,10 +13,30 @@ llama.interactive_llama = function()
end)
end

vim.cmd [[
command! Llama lua require'nvim-llama'.interactive_llama()
command! LlamaInstall lua require'nvim-llama.install'.install()
command! LlamaRebuild lua require'nvim-llama.rebuild'.rebuild()
]]
local function set_commands()
vim.api.nvim_create_user_command("Llama", function()
M.interactive_llama()
end, {})

return llama
vim.api.nvim_create_user_command("LlamaInstall", function()
llama_cpp.install()
end, {})

vim.api.nvim_create_user_command("LlamaRebuild", function()
llama_cpp.rebuild()
end, {})

vim.api.nvim_create_user_command("LlamaUpdate", function()
llama_cpp.update()
end, {})
end

function M.setup(config)
if config then
settings.set(config)
end

set_commands()
end

return M
73 changes: 51 additions & 22 deletions lua/nvim-llama/install.lua
Original file line number Diff line number Diff line change
@@ -1,15 +1,14 @@
local config = require("nvim-llama.config")
local settings = require("nvim-llama.settings")

local repo_url = 'https://github.com/ggerganov/llama.cpp.git'
local target_dir ='~/.local/share/nvim/llama.cpp'
local target_dir = vim.fn.expand('$HOME/.local/share/nvim/llama.cpp')
local repo_sha = 'b1095'

local M = {}

--- Check if a file or directory exists at path
-- Check if a file or directory exists at path
-- Attribution: https://stackoverflow.com/questions/1340230/check-if-directory-exists-in-lua
function dir_exists(target_dir)
local ok, err, code = os.rename(target_dir, target_dir)
local function dir_exists(target)
local ok, err, code = os.rename(target, target)

if not ok then
if code == 13 then
Expand All @@ -22,45 +21,56 @@ function dir_exists(target_dir)
end

-- Checks if llama.cpp directory exists at expected location
local function clone_repo_command(repo_url, target_dir, sha)
local ok, err = dir_exists(target_dir)
local function clone_repo_command(url, target, sha)
local ok, err = dir_exists(target)

if not ok then
local clone_cmd = string.format("git clone %s %s", repo_url, target_dir)
local checkout_cmd = string.format("git -C %s checkout %s", target_dir, sha)
local clone_cmd = string.format("git clone %s %s", url, target)
local checkout_cmd = string.format("git -C %s checkout %s", target, sha)

return clone_cmd .. ' && ' .. checkout_cmd
end

return
return ''
end

local function build_make_command(target_dir, opts)
local build_string = ''
local metal_build = 'make'
local function build_make_command(target)
local build_string = 'make'
local metal_build = ''

if opts.metal_build then
metal_build = 'LLAMA_METAL=1'
print(settings.current.build_metal)
if settings.current.build_metal then
metal_build = metal_build .. 'LLAMA_METAL=1 '
end

build_string = metal_build .. ' ' .. build_string
build_string = metal_build .. build_string

-- Navigate to the repo directory and build using make
return string.format("cd %s && %s", target_dir, build_string)
return string.format("cd %s && %s", target, build_string)
end

local M = {}

function M.rebuild()
print('Rebuilding llama.cpp')
build_make(target_dir, config.options)
print('Rebuilding llama.cpp at sha ' .. repo_sha)
local command = build_make_command(target_dir)
vim.fn.termopen(command)
end

function M.install()
vim.cmd('vsplit')
vim.cmd('enew')

local commands = ''
local clone_repo = clone_repo_command(repo_url, target_dir, repo_sha)
local build_make = build_make_command(target_dir, config.options)
local commands = clone_repo .. ' && ' .. build_make
local build_make = build_make_command(target_dir)
if clone_repo == '' then
commands = build_make
else
commands = clone_repo .. ' && ' .. build_make
end

print(commands)

vim.fn.termopen(commands)

Expand All @@ -72,4 +82,23 @@ function M.install()
vim.cmd('vertical resize 60')
end

function M.update()
local ok, err = dir_exists(target_dir)

if ok then
local checkout_cmd = string.format("git -C %s checkout %s", target_dir, repo_sha)
local output = vim.fn.system(checkout_cmd)
if vim.v.shell_error ~= 0 then
print("Error checking out llama.cpp at " .. repo_sha)
print(output)
else
print("Checked out llama.cpp at " .. repo_sha)
end
else
print("could not find llama.cpp directory")
end

-- TODO: Rebuild as well?
end

return M
21 changes: 21 additions & 0 deletions lua/nvim-llama/settings.lua
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
local M = {}

M.namespace = vim.api.nvim_create_namespace("nvim-llama")

local defaults = {
-- See plugin debugging logs
debug = false,

-- Build llama.cpp for GPU acceleration on Apple M chip devices.
-- If you are using an Apple M1/M2 laptop, it is highly recommended to
-- use this since, depending on the model, may drastically increase performance.
build_metal = false,
}

M.current = defaults

function M.set(opts)
M.current = vim.tbl_deep_extend("force", defaults, opts or {})
end

return M

0 comments on commit 5f25403

Please sign in to comment.