@@ -125,13 +125,6 @@ void replace_all(std::string & s, const std::string & search, const std::string
125
125
#include < hbwmalloc.h>
126
126
#endif
127
127
128
- static void zeros (std::ofstream & file, size_t n) {
129
- char zero = 0 ;
130
- for (size_t i = 0 ; i < n; ++i) {
131
- file.write (&zero, 1 );
132
- }
133
- }
134
-
135
128
LLAMA_ATTRIBUTE_FORMAT (1 , 2 )
136
129
static std::string format(const char * fmt, ...) {
137
130
va_list ap;
@@ -4922,7 +4915,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
4922
4915
LLAMA_LOG_INFO (" %s: meta size = %zu bytes\n " , __func__, meta_size);
4923
4916
4924
4917
// placeholder for the meta data
4925
- ::zeros ( fout, meta_size );
4918
+ fout. seekp (meta_size, std::ios_base::beg );
4926
4919
4927
4920
for (int i = 0 ; i < ml->n_tensors ; ++i) {
4928
4921
struct ggml_tensor * tensor = ml->get_tensor_meta (i);
@@ -5053,7 +5046,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
5053
5046
5054
5047
// write tensor data + padding
5055
5048
fout.write ((const char *) new_data, new_size);
5056
- zeros ( fout, GGML_PAD (new_size, align) - new_size);
5049
+ fout. seekp ( GGML_PAD (new_size, align) - new_size, std::ios_base::cur );
5057
5050
}
5058
5051
5059
5052
// go back to beginning of file and write the updated meta data
0 commit comments