diff options
| -rw-r--r-- | convert-pth-to-ggml.py | 2 | ||||
| -rw-r--r-- | main.cpp | 8 | ||||
| -rw-r--r-- | utils.cpp | 79 | ||||
| -rw-r--r-- | utils.h | 9 | 
4 files changed, 92 insertions, 6 deletions
| diff --git a/convert-pth-to-ggml.py b/convert-pth-to-ggml.py index d0a187c..bd0a9d0 100644 --- a/convert-pth-to-ggml.py +++ b/convert-pth-to-ggml.py @@ -73,7 +73,7 @@ fout.write(struct.pack("i", hparams["dim"]))  fout.write(struct.pack("i", hparams["multiple_of"]))  fout.write(struct.pack("i", hparams["n_heads"]))  fout.write(struct.pack("i", hparams["n_layers"])) -fout.write(struct.pack("i", 64)) # rot +fout.write(struct.pack("i", hparams["dim"] // hparams["n_heads"])) # rot (obsolete)  fout.write(struct.pack("i", ftype))  # Is this correct?? @@ -400,7 +400,7 @@ bool llama_eval(      const int n_ctx   = hparams.n_ctx;      const int n_head  = hparams.n_head;      const int n_vocab = hparams.n_vocab; -    const int n_rot   = hparams.n_rot; +    const int n_rot   = hparams.n_embd/hparams.n_head;      const int d_key = n_embd/n_head; @@ -628,6 +628,9 @@ int main(int argc, char ** argv) {          params.prompt = gpt_random_prompt(rng);      } +//    params.prompt = R"(// this function checks if the number n is prime +//bool is_prime(int n) {)"; +      int64_t t_load_us = 0;      gpt_vocab vocab; @@ -691,7 +694,6 @@ int main(int argc, char ** argv) {          if (i >= embd_inp.size()) {              // sample next token -            const int   top_k = params.top_k;              const float top_p = params.top_p;              const float temp  = params.temp; @@ -702,7 +704,7 @@ int main(int argc, char ** argv) {              {                  const int64_t t_start_sample_us = ggml_time_us(); -                id = gpt_sample_top_k_top_p(vocab, logits.data() + (logits.size() - n_vocab), top_k, top_p, temp, rng); +                id = llama_sample_top_p(vocab, logits.data() + (logits.size() - n_vocab), top_p, temp, rng);                  t_sample_us += ggml_time_us() - t_start_sample_us;              } @@ -257,7 +257,7 @@ std::vector<gpt_vocab::id> llama_tokenize(const gpt_vocab & vocab, const std::st              }          } -        if (l == 0 && t != 13) { +        if (l == 0) {              break;          } @@ -367,6 +367,83 @@ gpt_vocab::id gpt_sample_top_k_top_p(      return logits_id[idx].second;  } +gpt_vocab::id llama_sample_top_p( +        const gpt_vocab & vocab, +        const float * logits, +        double top_p, +        double temp, +        std::mt19937 & rng) { +    int n_logits = vocab.id_to_token.size(); + +    std::vector<std::pair<double, gpt_vocab::id>> logits_id; +    logits_id.reserve(n_logits); + +    { +        const double scale = 1.0/temp; +        for (int i = 0; i < n_logits; ++i) { +            logits_id.push_back(std::make_pair(logits[i]*scale, i)); +        } +    } + +    std::sort( +            logits_id.begin(), +            logits_id.end(), +            [](const std::pair<double, gpt_vocab::id> & a, const std::pair<double, gpt_vocab::id> & b) { +        return a.first > b.first; +    }); + +    double maxl = -INFINITY; +    for (const auto & kv : logits_id) { +        maxl = std::max(maxl, kv.first); +    } + +    // compute probs for the top K tokens +    std::vector<double> probs; +    probs.reserve(logits_id.size()); + +    double sum = 0.0; +    for (const auto & kv : logits_id) { +        double p = exp(kv.first - maxl); +        probs.push_back(p); +        sum += p; +    } + +    // normalize the probs +    for (auto & p : probs) { +        p /= sum; +    } + +    if (top_p < 1.0f) { +        double cumsum = 0.0f; +        for (int i = 0; i < (int) probs.size(); i++) { +            cumsum += probs[i]; +            if (cumsum >= top_p) { +                probs.resize(i + 1); +                logits_id.resize(i + 1); +                break; +            } +        } + +        cumsum = 1.0/cumsum; +        for (int i = 0; i < (int) probs.size(); i++) { +            probs[i] *= cumsum; +        } +    } + +    //printf("\n"); +    //for (int i = 0; i < (int) 10; i++) { +    //    printf("%d: '%s' %f\n", i, vocab.id_to_token.at(logits_id[i].second).c_str(), probs[i]); +    //} +    //printf("\n\n"); +    //exit(0); + +    std::discrete_distribution<> dist(probs.begin(), probs.end()); +    int idx = dist(rng); + +    return logits_id[idx].second; +} + +  size_t ggml_quantize_q4_0(float * src, void * dst, int n, int k, int qk, int64_t * hist) {      const int nb = k / qk;      const size_t row_size = nb*(sizeof(float) + sizeof(uint8_t)*qk/2); @@ -18,7 +18,7 @@ struct gpt_params {      int32_t n_predict = 128; // new tokens to predict      // sampling parameters -    int32_t top_k = 40; +    int32_t top_k = 40; // unused      float   top_p = 0.95f;      float   temp  = 0.80f; @@ -86,6 +86,13 @@ gpt_vocab::id gpt_sample_top_k_top_p(          double temp,          std::mt19937 & rng); +gpt_vocab::id llama_sample_top_p( +        const gpt_vocab & vocab, +        const float * logits, +        double top_p, +        double temp, +        std::mt19937 & rng); +  //  // Quantization  // | 
