Skip to content

Commit 4947b81

Browse files
committed
Merge branch 'master' into xsn/mtmd_graph_builder_refactor
2 parents 9eb496b + 1e333d5 commit 4947b81

File tree

20 files changed

+152
-99
lines changed

20 files changed

+152
-99
lines changed

common/common.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,7 @@ enum common_sampler_type {
9696
COMMON_SAMPLER_TYPE_XTC = 8,
9797
COMMON_SAMPLER_TYPE_INFILL = 9,
9898
COMMON_SAMPLER_TYPE_PENALTIES = 10,
99+
COMMON_SAMPLER_TYPE_TOP_N_SIGMA = 11,
99100
};
100101

101102
// dimensionality reduction methods, used by cvector-generator
@@ -161,6 +162,7 @@ struct common_params_sampling {
161162
std::vector<enum common_sampler_type> samplers = {
162163
COMMON_SAMPLER_TYPE_PENALTIES,
163164
COMMON_SAMPLER_TYPE_DRY,
165+
COMMON_SAMPLER_TYPE_TOP_N_SIGMA,
164166
COMMON_SAMPLER_TYPE_TOP_K,
165167
COMMON_SAMPLER_TYPE_TYPICAL_P,
166168
COMMON_SAMPLER_TYPE_TOP_P,

common/sampling.cpp

Lines changed: 46 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -229,51 +229,48 @@ struct common_sampler * common_sampler_init(const struct llama_model * model, co
229229
params.logit_bias.data()));
230230

231231
if (params.mirostat == 0) {
232-
if (params.top_n_sigma >= 0) {
233-
llama_sampler_chain_add(result->chain, llama_sampler_init_top_k (params.top_k));
234-
llama_sampler_chain_add(result->chain, llama_sampler_init_temp (params.temp));
235-
llama_sampler_chain_add(result->chain, llama_sampler_init_top_n_sigma (params.top_n_sigma));
236-
} else {
237-
for (const auto & cnstr : params.samplers) {
238-
switch (cnstr) {
239-
case COMMON_SAMPLER_TYPE_DRY:
240-
{
241-
std::vector<const char *> c_breakers;
242-
c_breakers.reserve(params.dry_sequence_breakers.size());
243-
for (const auto & str : params.dry_sequence_breakers) {
244-
c_breakers.push_back(str.c_str());
245-
}
246-
247-
llama_sampler_chain_add(result->chain, llama_sampler_init_dry (vocab, llama_model_n_ctx_train(model), params.dry_multiplier, params.dry_base, params.dry_allowed_length, params.dry_penalty_last_n, c_breakers.data(), c_breakers.size()));
232+
for (const auto & cnstr : params.samplers) {
233+
switch (cnstr) {
234+
case COMMON_SAMPLER_TYPE_DRY:
235+
{
236+
std::vector<const char *> c_breakers;
237+
c_breakers.reserve(params.dry_sequence_breakers.size());
238+
for (const auto & str : params.dry_sequence_breakers) {
239+
c_breakers.push_back(str.c_str());
248240
}
249-
break;
250-
case COMMON_SAMPLER_TYPE_TOP_K:
251-
llama_sampler_chain_add(result->chain, llama_sampler_init_top_k (params.top_k));
252-
break;
253-
case COMMON_SAMPLER_TYPE_TOP_P:
254-
llama_sampler_chain_add(result->chain, llama_sampler_init_top_p (params.top_p, params.min_keep));
255-
break;
256-
case COMMON_SAMPLER_TYPE_MIN_P:
257-
llama_sampler_chain_add(result->chain, llama_sampler_init_min_p (params.min_p, params.min_keep));
258-
break;
259-
case COMMON_SAMPLER_TYPE_XTC:
260-
llama_sampler_chain_add(result->chain, llama_sampler_init_xtc (params.xtc_probability, params.xtc_threshold, params.min_keep, params.seed));
261-
break;
262-
case COMMON_SAMPLER_TYPE_TYPICAL_P:
263-
llama_sampler_chain_add(result->chain, llama_sampler_init_typical (params.typ_p, params.min_keep));
264-
break;
265-
case COMMON_SAMPLER_TYPE_TEMPERATURE:
266-
llama_sampler_chain_add(result->chain, llama_sampler_init_temp_ext (params.temp, params.dynatemp_range, params.dynatemp_exponent));
267-
break;
268-
case COMMON_SAMPLER_TYPE_INFILL:
269-
llama_sampler_chain_add(result->chain, llama_sampler_init_infill (vocab));
270-
break;
271-
case COMMON_SAMPLER_TYPE_PENALTIES:
272-
llama_sampler_chain_add(result->chain, llama_sampler_init_penalties(params.penalty_last_n, params.penalty_repeat, params.penalty_freq, params.penalty_present));
273-
break;
274-
default:
275-
GGML_ASSERT(false && "unknown sampler type");
276-
}
241+
242+
llama_sampler_chain_add(result->chain, llama_sampler_init_dry (vocab, llama_model_n_ctx_train(model), params.dry_multiplier, params.dry_base, params.dry_allowed_length, params.dry_penalty_last_n, c_breakers.data(), c_breakers.size()));
243+
}
244+
break;
245+
case COMMON_SAMPLER_TYPE_TOP_K:
246+
llama_sampler_chain_add(result->chain, llama_sampler_init_top_k (params.top_k));
247+
break;
248+
case COMMON_SAMPLER_TYPE_TOP_P:
249+
llama_sampler_chain_add(result->chain, llama_sampler_init_top_p (params.top_p, params.min_keep));
250+
break;
251+
case COMMON_SAMPLER_TYPE_TOP_N_SIGMA:
252+
llama_sampler_chain_add(result->chain, llama_sampler_init_top_n_sigma (params.top_n_sigma));
253+
break;
254+
case COMMON_SAMPLER_TYPE_MIN_P:
255+
llama_sampler_chain_add(result->chain, llama_sampler_init_min_p (params.min_p, params.min_keep));
256+
break;
257+
case COMMON_SAMPLER_TYPE_XTC:
258+
llama_sampler_chain_add(result->chain, llama_sampler_init_xtc (params.xtc_probability, params.xtc_threshold, params.min_keep, params.seed));
259+
break;
260+
case COMMON_SAMPLER_TYPE_TYPICAL_P:
261+
llama_sampler_chain_add(result->chain, llama_sampler_init_typical (params.typ_p, params.min_keep));
262+
break;
263+
case COMMON_SAMPLER_TYPE_TEMPERATURE:
264+
llama_sampler_chain_add(result->chain, llama_sampler_init_temp_ext (params.temp, params.dynatemp_range, params.dynatemp_exponent));
265+
break;
266+
case COMMON_SAMPLER_TYPE_INFILL:
267+
llama_sampler_chain_add(result->chain, llama_sampler_init_infill (vocab));
268+
break;
269+
case COMMON_SAMPLER_TYPE_PENALTIES:
270+
llama_sampler_chain_add(result->chain, llama_sampler_init_penalties (params.penalty_last_n, params.penalty_repeat, params.penalty_freq, params.penalty_present));
271+
break;
272+
default:
273+
GGML_ASSERT(false && "unknown sampler type");
277274
}
278275
}
279276
llama_sampler_chain_add(result->chain, llama_sampler_init_dist(params.seed));
@@ -475,6 +472,7 @@ char common_sampler_type_to_chr(enum common_sampler_type cnstr) {
475472
case COMMON_SAMPLER_TYPE_TOP_K: return 'k';
476473
case COMMON_SAMPLER_TYPE_TYPICAL_P: return 'y';
477474
case COMMON_SAMPLER_TYPE_TOP_P: return 'p';
475+
case COMMON_SAMPLER_TYPE_TOP_N_SIGMA: return 's';
478476
case COMMON_SAMPLER_TYPE_MIN_P: return 'm';
479477
case COMMON_SAMPLER_TYPE_TEMPERATURE: return 't';
480478
case COMMON_SAMPLER_TYPE_XTC: return 'x';
@@ -490,6 +488,7 @@ std::string common_sampler_type_to_str(enum common_sampler_type cnstr) {
490488
case COMMON_SAMPLER_TYPE_TOP_K: return "top_k";
491489
case COMMON_SAMPLER_TYPE_TYPICAL_P: return "typ_p";
492490
case COMMON_SAMPLER_TYPE_TOP_P: return "top_p";
491+
case COMMON_SAMPLER_TYPE_TOP_N_SIGMA: return "top_n_sigma";
493492
case COMMON_SAMPLER_TYPE_MIN_P: return "min_p";
494493
case COMMON_SAMPLER_TYPE_TEMPERATURE: return "temperature";
495494
case COMMON_SAMPLER_TYPE_XTC: return "xtc";
@@ -504,6 +503,7 @@ std::vector<common_sampler_type> common_sampler_types_from_names(const std::vect
504503
{ "dry", COMMON_SAMPLER_TYPE_DRY },
505504
{ "top_k", COMMON_SAMPLER_TYPE_TOP_K },
506505
{ "top_p", COMMON_SAMPLER_TYPE_TOP_P },
506+
{ "top_n_sigma", COMMON_SAMPLER_TYPE_TOP_N_SIGMA },
507507
{ "typ_p", COMMON_SAMPLER_TYPE_TYPICAL_P },
508508
{ "min_p", COMMON_SAMPLER_TYPE_MIN_P },
509509
{ "temperature", COMMON_SAMPLER_TYPE_TEMPERATURE },
@@ -517,6 +517,7 @@ std::vector<common_sampler_type> common_sampler_types_from_names(const std::vect
517517
std::unordered_map<std::string, common_sampler_type> sampler_alt_name_map {
518518
{ "top-k", COMMON_SAMPLER_TYPE_TOP_K },
519519
{ "top-p", COMMON_SAMPLER_TYPE_TOP_P },
520+
{ "top-n-sigma", COMMON_SAMPLER_TYPE_TOP_N_SIGMA },
520521
{ "nucleus", COMMON_SAMPLER_TYPE_TOP_P },
521522
{ "typical-p", COMMON_SAMPLER_TYPE_TYPICAL_P },
522523
{ "typical", COMMON_SAMPLER_TYPE_TYPICAL_P },
@@ -552,6 +553,7 @@ std::vector<common_sampler_type> common_sampler_types_from_chars(const std::stri
552553
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TOP_K), COMMON_SAMPLER_TYPE_TOP_K },
553554
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TYPICAL_P), COMMON_SAMPLER_TYPE_TYPICAL_P },
554555
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TOP_P), COMMON_SAMPLER_TYPE_TOP_P },
556+
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TOP_N_SIGMA), COMMON_SAMPLER_TYPE_TOP_N_SIGMA },
555557
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_MIN_P), COMMON_SAMPLER_TYPE_MIN_P },
556558
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_TEMPERATURE), COMMON_SAMPLER_TYPE_TEMPERATURE },
557559
{ common_sampler_type_to_chr(COMMON_SAMPLER_TYPE_XTC), COMMON_SAMPLER_TYPE_XTC },

convert_hf_to_gguf.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2761,6 +2761,13 @@ def set_gguf_parameters(self):
27612761
if (shared_expert_intermediate_size := self.hparams.get('shared_expert_intermediate_size')) is not None:
27622762
self.gguf_writer.add_expert_shared_feed_forward_length(shared_expert_intermediate_size)
27632763
logger.info(f"gguf: expert shared feed forward length = {shared_expert_intermediate_size}")
2764+
# YaRN is not enabled by default
2765+
# To enable it, please refer to this guide: https://huggingface.co/Qwen/Qwen3-30B-A3B#processing-long-texts
2766+
if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
2767+
if self.hparams["rope_scaling"].get("type") == "yarn":
2768+
self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN)
2769+
self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
2770+
self.gguf_writer.add_rope_scaling_orig_ctx_len(self.hparams["rope_scaling"]["original_max_position_embeddings"])
27642771

27652772
_experts: list[dict[str, Tensor]] | None = None
27662773

ggml/include/ggml-backend.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ extern "C" {
3838
GGML_API ggml_backend_buffer_t ggml_backend_buft_alloc_buffer (ggml_backend_buffer_type_t buft, size_t size);
3939
GGML_API size_t ggml_backend_buft_get_alignment (ggml_backend_buffer_type_t buft);
4040
GGML_API size_t ggml_backend_buft_get_max_size (ggml_backend_buffer_type_t buft);
41-
GGML_API size_t ggml_backend_buft_get_alloc_size(ggml_backend_buffer_type_t buft, struct ggml_tensor * tensor);
41+
GGML_API size_t ggml_backend_buft_get_alloc_size(ggml_backend_buffer_type_t buft, const struct ggml_tensor * tensor);
4242
GGML_API bool ggml_backend_buft_is_host (ggml_backend_buffer_type_t buft);
4343
GGML_API ggml_backend_dev_t ggml_backend_buft_get_device (ggml_backend_buffer_type_t buft);
4444

@@ -59,7 +59,7 @@ extern "C" {
5959
GGML_API enum ggml_status ggml_backend_buffer_init_tensor (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
6060
GGML_API size_t ggml_backend_buffer_get_alignment (ggml_backend_buffer_t buffer);
6161
GGML_API size_t ggml_backend_buffer_get_max_size (ggml_backend_buffer_t buffer);
62-
GGML_API size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
62+
GGML_API size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor);
6363
GGML_API void ggml_backend_buffer_clear (ggml_backend_buffer_t buffer, uint8_t value);
6464
GGML_API bool ggml_backend_buffer_is_host (ggml_backend_buffer_t buffer);
6565
GGML_API void ggml_backend_buffer_set_usage (ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage);

ggml/include/ggml.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -673,11 +673,15 @@ extern "C" {
673673
GGML_API bool ggml_is_3d (const struct ggml_tensor * tensor);
674674
GGML_API int ggml_n_dims (const struct ggml_tensor * tensor); // returns 1 for scalars
675675

676+
// returns whether the tensor elements can be iterated over with a flattened index (no gaps, no permutation)
676677
GGML_API bool ggml_is_contiguous (const struct ggml_tensor * tensor);
677678
GGML_API bool ggml_is_contiguous_0(const struct ggml_tensor * tensor); // same as ggml_is_contiguous()
678679
GGML_API bool ggml_is_contiguous_1(const struct ggml_tensor * tensor); // contiguous for dims >= 1
679680
GGML_API bool ggml_is_contiguous_2(const struct ggml_tensor * tensor); // contiguous for dims >= 2
680681

682+
// returns whether the tensor elements are allocated as one contiguous block of memory (no gaps, but permutation ok)
683+
GGML_API bool ggml_is_contiguously_allocated(const struct ggml_tensor * tensor);
684+
681685
// true for tensor that is stored in memory as CxWxHxN and has been permuted to WxHxCxN
682686
GGML_API bool ggml_is_contiguous_channels(const struct ggml_tensor * tensor);
683687

ggml/src/ggml-backend.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ size_t ggml_backend_buft_get_max_size(ggml_backend_buffer_type_t buft) {
5656
return SIZE_MAX;
5757
}
5858

59-
size_t ggml_backend_buft_get_alloc_size(ggml_backend_buffer_type_t buft, struct ggml_tensor * tensor) {
59+
size_t ggml_backend_buft_get_alloc_size(ggml_backend_buffer_type_t buft, const struct ggml_tensor * tensor) {
6060
// get_alloc_size is optional, defaults to ggml_nbytes
6161
if (buft->iface.get_alloc_size) {
6262
size_t size = buft->iface.get_alloc_size(buft, tensor);
@@ -152,7 +152,7 @@ size_t ggml_backend_buffer_get_max_size(ggml_backend_buffer_t buffer) {
152152
return ggml_backend_buft_get_max_size(ggml_backend_buffer_get_type(buffer));
153153
}
154154

155-
size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
155+
size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor) {
156156
return ggml_backend_buft_get_alloc_size(ggml_backend_buffer_get_type(buffer), tensor);
157157
}
158158

ggml/src/ggml-cuda/fattn-common.cuh

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -719,6 +719,7 @@ void launch_fattn(
719719
size_t nb23 = V->nb[3];
720720

721721
if (need_f16_K && K->type != GGML_TYPE_F16) {
722+
GGML_ASSERT(ggml_is_contiguously_allocated(K));
722723
K_f16.alloc(ggml_nelements(K));
723724
to_fp16_cuda_t to_fp16 = ggml_get_to_fp16_cuda(K->type);
724725
to_fp16(K_data, K_f16.ptr, ggml_nelements(K), main_stream);
@@ -733,6 +734,7 @@ void launch_fattn(
733734
}
734735

735736
if (need_f16_V && V->type != GGML_TYPE_F16) {
737+
GGML_ASSERT(ggml_is_contiguously_allocated(V));
736738
V_f16.alloc(ggml_nelements(V));
737739
to_fp16_cuda_t to_fp16 = ggml_get_to_fp16_cuda(V->type);
738740
to_fp16(V_data, V_f16.ptr, ggml_nelements(V), main_stream);

ggml/src/ggml-cuda/ggml-cuda.cu

Lines changed: 14 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -555,8 +555,8 @@ static enum ggml_status ggml_backend_cuda_buffer_init_tensor(ggml_backend_buffer
555555

556556
if (ggml_is_quantized(tensor->type) && tensor->view_src == nullptr && ggml_backend_buffer_get_usage(buffer) != GGML_BACKEND_BUFFER_USAGE_COMPUTE) {
557557
// initialize padding to 0 to avoid possible NaN values
558-
size_t original_size = ggml_nbytes(tensor);
559-
size_t padded_size = ggml_backend_buft_get_alloc_size(buffer->buft, tensor);
558+
const size_t original_size = ggml_nbytes(tensor);
559+
const size_t padded_size = ggml_backend_buft_get_alloc_size(buffer->buft, tensor);
560560

561561
if (padded_size > original_size) {
562562
ggml_cuda_set_device(ctx->device);
@@ -679,6 +679,7 @@ static size_t ggml_backend_cuda_buffer_type_get_alloc_size(ggml_backend_buffer_t
679679

680680
if (ggml_is_quantized(tensor->type)) {
681681
if (ne0 % MATRIX_ROW_PADDING != 0) {
682+
GGML_ASSERT(tensor->nb[0] == ggml_element_size(tensor));
682683
size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING);
683684
}
684685
}
@@ -800,6 +801,7 @@ static void * ggml_backend_cuda_split_buffer_get_base(ggml_backend_buffer_t buff
800801

801802
static enum ggml_status ggml_backend_cuda_split_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) {
802803
GGML_ASSERT(tensor->view_src == nullptr); // views of split tensors are not supported
804+
GGML_ASSERT(ggml_is_contiguous(tensor) && "split buffers only supported for contiguous tensors");
803805

804806
ggml_backend_cuda_split_buffer_context * ctx = (ggml_backend_cuda_split_buffer_context *)buffer->context;
805807
ggml_backend_cuda_split_buffer_type_context * buft_ctx = (ggml_backend_cuda_split_buffer_type_context *)buffer->buft->context;
@@ -851,6 +853,7 @@ static void ggml_backend_cuda_split_buffer_set_tensor(ggml_backend_buffer_t buff
851853
// split tensors must always be set in their entirety at once
852854
GGML_ASSERT(offset == 0);
853855
GGML_ASSERT(size == ggml_nbytes(tensor));
856+
GGML_ASSERT(ggml_is_contiguous(tensor) && "split buffers only supported for contiguous tensors");
854857

855858
ggml_backend_cuda_split_buffer_type_context * buft_ctx = (ggml_backend_cuda_split_buffer_type_context *)buffer->buft->context;
856859

@@ -889,6 +892,7 @@ static void ggml_backend_cuda_split_buffer_get_tensor(ggml_backend_buffer_t buff
889892
// split tensors must always be set in their entirety at once
890893
GGML_ASSERT(offset == 0);
891894
GGML_ASSERT(size == ggml_nbytes(tensor));
895+
GGML_ASSERT(ggml_is_contiguous(tensor) && "split buffers only supported for contiguous tensors");
892896

893897
ggml_backend_cuda_split_buffer_type_context * buft_ctx = (ggml_backend_cuda_split_buffer_type_context *)buffer->buft->context;
894898

@@ -970,6 +974,7 @@ static size_t ggml_backend_cuda_split_buffer_type_get_alignment(ggml_backend_buf
970974

971975
static size_t ggml_backend_cuda_split_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
972976
ggml_backend_cuda_split_buffer_type_context * ctx = (ggml_backend_cuda_split_buffer_type_context *)buft->context;
977+
GGML_ASSERT(ggml_is_contiguous(tensor) && "split buffers only supported for contiguous tensors");
973978

974979
size_t total_size = 0;
975980

@@ -1531,6 +1536,8 @@ static void ggml_cuda_op_mul_mat(
15311536

15321537
// If src0 is on a temporary compute buffer (partial offloading) there may be some padding that needs to be cleared:
15331538
if (ne00 % MATRIX_ROW_PADDING != 0 && ggml_is_quantized(src0->type) && ggml_backend_buffer_get_usage(src0->buffer) == GGML_BACKEND_BUFFER_USAGE_COMPUTE && src0->view_src == nullptr) {
1539+
GGML_ASSERT(ggml_is_contiguously_allocated(src0));
1540+
GGML_ASSERT(!src0->view_src);
15341541
const size_t nbytes_data = ggml_row_size(src0->type, (dev[id].row_high - dev[id].row_low)*ne00);
15351542
const size_t nbytes_padding = ggml_row_size(src0->type, MATRIX_ROW_PADDING - ne00 % MATRIX_ROW_PADDING);
15361543
CUDA_CHECK(cudaMemsetAsync(dev[id].src0_dd + nbytes_data, 0, nbytes_padding, stream));
@@ -2062,9 +2069,11 @@ static void ggml_cuda_mul_mat_id(ggml_backend_cuda_context & ctx, ggml_tensor *
20622069
}
20632070

20642071
ggml_tensor src0_slice = *src0;
2065-
src0_slice.ne[2] = 1;
2066-
src0_slice.nb[3] = src0_slice.nb[2];
2067-
src0_slice.data = (char *) src0->data + i02*nb02;
2072+
src0_slice.ne[2] = 1;
2073+
src0_slice.nb[3] = src0_slice.nb[2];
2074+
src0_slice.op = GGML_OP_VIEW;
2075+
src0_slice.view_src = dst->src[0]; // non-const pointer to src0
2076+
src0_slice.data = (char *) src0->data + i02*nb02;
20682077

20692078
ggml_tensor src1_slice;
20702079
memset(&src1_slice, 0, sizeof(src1_slice));

0 commit comments

Comments
 (0)