Browse Source

Fixed `bool` marshalling

pull/712/head
Martin Evans 2 years ago
parent
commit
f55222bc38
4 changed files with 11 additions and 2 deletions
  1. +1
    -0
      LLama/LLamaStatelessExecutor.cs
  2. +3
    -2
      LLama/Native/NativeApi.LLava.cs
  3. +6
    -0
      LLama/Native/NativeApi.cs
  4. +1
    -0
      LLama/Native/SafeLlamaModelHandle.cs

+ 1
- 0
LLama/LLamaStatelessExecutor.cs View File

@@ -4,6 +4,7 @@ using System;
using System.Collections.Generic;
using System.Linq;
using System.Runtime.CompilerServices;
using System.Text;
using System.Threading;
using LLama.Exceptions;
using LLama.Native;


+ 3
- 2
LLama/Native/NativeApi.LLava.cs View File

@@ -13,6 +13,7 @@ public static unsafe partial class NativeApi
/// <param name="ctxClip">Llava Model</param>
/// <returns>True if validate successfully</returns>
[DllImport(llavaLibraryName, EntryPoint = "llava_validate_embed_size", CallingConvention = CallingConvention.Cdecl)]
[return: MarshalAs(UnmanagedType.U1)]
public static extern bool llava_validate_embed_size( SafeLLamaContextHandle ctxLlama, SafeLlavaModelHandle ctxClip);

/// <summary>
@@ -56,7 +57,7 @@ public static unsafe partial class NativeApi
/// <param name="embed">Embedding handle</param>
/// <returns>True on success</returns>
[DllImport(llavaLibraryName, EntryPoint = "llava_eval_image_embed", CallingConvention = CallingConvention.Cdecl)]
public static extern bool llava_eval_image_embed(SafeLLamaContextHandle ctx_llama, SafeLlavaImageEmbedHandle embed,
int n_batch, ref int n_past);
[return: MarshalAs(UnmanagedType.U1)]
public static extern bool llava_eval_image_embed(SafeLLamaContextHandle ctx_llama, SafeLlavaImageEmbedHandle embed, int n_batch, ref int n_past);
}

+ 6
- 0
LLama/Native/NativeApi.cs View File

@@ -34,6 +34,7 @@ namespace LLama.Native
/// </summary>
/// <returns></returns>
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
[return: MarshalAs(UnmanagedType.U1)]
public static extern bool llama_supports_mmap();

/// <summary>
@@ -41,6 +42,7 @@ namespace LLama.Native
/// </summary>
/// <returns></returns>
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
[return: MarshalAs(UnmanagedType.U1)]
public static extern bool llama_supports_mlock();

/// <summary>
@@ -48,6 +50,7 @@ namespace LLama.Native
/// </summary>
/// <returns></returns>
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
[return: MarshalAs(UnmanagedType.U1)]
public static extern bool llama_supports_gpu_offload();

/// <summary>
@@ -77,6 +80,7 @@ namespace LLama.Native
/// <param name="n_token_count_out"></param>
/// <returns></returns>
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
[return: MarshalAs(UnmanagedType.U1)]
public static extern bool llama_state_load_file(SafeLLamaContextHandle ctx, string path_session, LLamaToken[] tokens_out, ulong n_token_capacity, out ulong n_token_count_out);

/// <summary>
@@ -88,6 +92,7 @@ namespace LLama.Native
/// <param name="n_token_count"></param>
/// <returns></returns>
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
[return: MarshalAs(UnmanagedType.U1)]
public static extern bool llama_state_save_file(SafeLLamaContextHandle ctx, string path_session, LLamaToken[] tokens, ulong n_token_count);

[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
@@ -284,6 +289,7 @@ namespace LLama.Native
/// <param name="p1"></param>
/// <returns>Returns false if a partial sequence cannot be removed. Removing a whole sequence never fails</returns>
[DllImport(libraryName, CallingConvention = CallingConvention.Cdecl)]
[return: MarshalAs(UnmanagedType.U1)]
public static extern bool llama_kv_cache_seq_rm(SafeLLamaContextHandle ctx, LLamaSeqId seq, LLamaPos p0, LLamaPos p1);

/// <summary>


+ 1
- 0
LLama/Native/SafeLlamaModelHandle.cs View File

@@ -369,6 +369,7 @@ namespace LLama.Native
/// <param name="token"></param>
/// <returns></returns>
[DllImport(NativeApi.libraryName, CallingConvention = CallingConvention.Cdecl)]
[return: MarshalAs(UnmanagedType.U1)]
private static extern bool llama_token_is_eog(SafeLlamaModelHandle model, LLamaToken token);
#endregion



Loading…
Cancel
Save