feat: update the llama backends.tags/v0.4.2-preview
| @@ -12,7 +12,7 @@ jobs: | |||||
| strategy: | strategy: | ||||
| fail-fast: false | fail-fast: false | ||||
| matrix: | matrix: | ||||
| build: [linux-debug, linux-release, macos-debug, macos-release, windows-debug, windows-release] | |||||
| build: [linux-debug, linux-release, windows-debug, windows-release] | |||||
| include: | include: | ||||
| - build: linux-debug | - build: linux-debug | ||||
| os: ubuntu-latest | os: ubuntu-latest | ||||
| @@ -20,12 +20,12 @@ jobs: | |||||
| - build: linux-release | - build: linux-release | ||||
| os: ubuntu-latest | os: ubuntu-latest | ||||
| config: release | config: release | ||||
| - build: macos-debug | |||||
| os: macos-latest | |||||
| config: debug | |||||
| - build: macos-release | |||||
| os: macos-latest | |||||
| config: release | |||||
| # - build: macos-debug | |||||
| # os: macos-latest | |||||
| # config: debug | |||||
| # - build: macos-release | |||||
| # os: macos-latest | |||||
| # config: release | |||||
| - build: windows-debug | - build: windows-debug | ||||
| os: windows-2019 | os: windows-2019 | ||||
| config: debug | config: debug | ||||
| @@ -30,8 +30,11 @@ namespace LLama.Common | |||||
| /// <param name="data"></param> | /// <param name="data"></param> | ||||
| public FixedSizeQueue(int size, IEnumerable<T> data) | public FixedSizeQueue(int size, IEnumerable<T> data) | ||||
| { | { | ||||
| #if NETCOREAPP3_0_OR_GREATER | |||||
| // Try an early check on the amount of data supplied (if possible) | |||||
| #if NETSTANDARD2_0 | |||||
| var dataCount = data.Count(); | |||||
| if (data.Count() > size) | |||||
| throw new ArgumentException($"The max size set for the quene is {size}, but got {dataCount} initial values."); | |||||
| #else | |||||
| if (data.TryGetNonEnumeratedCount(out var count) && count > size) | if (data.TryGetNonEnumeratedCount(out var count) && count > size) | ||||
| throw new ArgumentException($"The max size set for the quene is {size}, but got {count} initial values."); | throw new ArgumentException($"The max size set for the quene is {size}, but got {count} initial values."); | ||||
| #endif | #endif | ||||
| @@ -42,9 +45,12 @@ namespace LLama.Common | |||||
| // Now check if that list is a valid size | // Now check if that list is a valid size | ||||
| if (_storage.Count > _maxSize) | if (_storage.Count > _maxSize) | ||||
| throw new ArgumentException($"The max size set for the quene is {size}, but got {_storage.Count} initial values."); | |||||
| #if NETSTANDARD2_0 | |||||
| throw new ArgumentException($"The max size set for the quene is {size}, but got {dataCount} initial values."); | |||||
| #else | |||||
| throw new ArgumentException($"The max size set for the quene is {size}, but got {count} initial values."); | |||||
| #endif | |||||
| } | } | ||||
| /// <summary> | /// <summary> | ||||
| /// Replace every item in the queue with the given value | /// Replace every item in the queue with the given value | ||||
| /// </summary> | /// </summary> | ||||
| @@ -31,6 +31,10 @@ | |||||
| <CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory> | <CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory> | ||||
| <Link>libllama.dylib</Link> | <Link>libllama.dylib</Link> | ||||
| </None> | </None> | ||||
| <None Include="$(MSBuildThisFileDirectory)runtimes/libllama-metal.dylib"> | |||||
| <CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory> | |||||
| <Link>libllama-metal.dylib</Link> | |||||
| </None> | |||||
| <None Include="$(MSBuildThisFileDirectory)runtimes/ggml-metal.metal"> | <None Include="$(MSBuildThisFileDirectory)runtimes/ggml-metal.metal"> | ||||
| <CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory> | <CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory> | ||||
| <Link>ggml-metal.metal</Link> | <Link>ggml-metal.metal</Link> | ||||
| @@ -21,7 +21,8 @@ namespace LLama.Native | |||||
| "1. No LLamaSharp backend was installed. Please search LLamaSharp.Backend and install one of them. \n" + | "1. No LLamaSharp backend was installed. Please search LLamaSharp.Backend and install one of them. \n" + | ||||
| "2. You are using a device with only CPU but installed cuda backend. Please install cpu backend instead. \n" + | "2. You are using a device with only CPU but installed cuda backend. Please install cpu backend instead. \n" + | ||||
| "3. The backend is not compatible with your system cuda environment. Please check and fix it. If the environment is " + | "3. The backend is not compatible with your system cuda environment. Please check and fix it. If the environment is " + | ||||
| "expected not to be changed, then consider build llama.cpp from source or submit an issue to LLamaSharp."); | |||||
| "expected not to be changed, then consider build llama.cpp from source or submit an issue to LLamaSharp.\n" + | |||||
| "4. One of the dependency of the native library is missed.\n"); | |||||
| } | } | ||||
| NativeApi.llama_backend_init(false); | NativeApi.llama_backend_init(false); | ||||
| } | } | ||||