Please log in to take part in the discussion (add own reviews or comments).
Cite this publication
More citation styles
- please select -
%0 Generic
%1 ma2024era
%A Ma, Shuming
%A Wang, Hongyu
%A Ma, Lingxiao
%A Wang, Lei
%A Wang, Wenhui
%A Huang, Shaohan
%A Dong, Li
%A Wang, Ruiping
%A Xue, Jilong
%A Wei, Furu
%D 2024
%K llm nlp reading smol
%T The Era of 1-bit LLMs: All Large Language Models are in 1.58 Bits
@misc{ma2024era,
added-at = {2024-04-19T09:31:46.000+0200},
archiveprefix = {arXiv},
author = {Ma, Shuming and Wang, Hongyu and Ma, Lingxiao and Wang, Lei and Wang, Wenhui and Huang, Shaohan and Dong, Li and Wang, Ruiping and Xue, Jilong and Wei, Furu},
biburl = {https://www.bibsonomy.org/bibtex/2c37de034d5b854f1a6ae851f49728763/tobias.koopmann},
description = {The Era of 1-bit LLMs: All Large Language Models are in 1.58 Bits},
eprint = {2402.17764},
interhash = {afcad28d08063d1c50a06d7fc8cc1aca},
intrahash = {c37de034d5b854f1a6ae851f49728763},
keywords = {llm nlp reading smol},
primaryclass = {cs.CL},
timestamp = {2024-04-19T09:31:46.000+0200},
title = {The Era of 1-bit LLMs: All Large Language Models are in 1.58 Bits},
year = 2024
}