|
9 | 9 | #include "cc_buffer_mgr.h"
|
10 | 10 | #include "cc_lli_defs.h"
|
11 | 11 | #include "cc_cipher.h"
|
| 12 | +#include "cc_hash.h" |
12 | 13 |
|
13 | 14 | enum dma_buffer_type {
|
14 | 15 | DMA_NULL_TYPE = -1,
|
@@ -348,9 +349,33 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
|
348 | 349 | return 0;
|
349 | 350 | }
|
350 | 351 |
|
| 352 | +static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx, |
| 353 | + u8 *curr_buff, u32 curr_buff_cnt, |
| 354 | + struct buffer_array *sg_data) |
| 355 | +{ |
| 356 | + dev_dbg(dev, " handle curr buff %x set to DLLI\n", curr_buff_cnt); |
| 357 | + /* create sg for the current buffer */ |
| 358 | + sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt); |
| 359 | + if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) { |
| 360 | + dev_err(dev, "dma_map_sg() src buffer failed\n"); |
| 361 | + return -ENOMEM; |
| 362 | + } |
| 363 | + dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n", |
| 364 | + &sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg), |
| 365 | + sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset, |
| 366 | + areq_ctx->buff_sg->length); |
| 367 | + areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI; |
| 368 | + areq_ctx->curr_sg = areq_ctx->buff_sg; |
| 369 | + areq_ctx->in_nents = 0; |
| 370 | + /* prepare for case of MLLI */ |
| 371 | + cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0, |
| 372 | + false, NULL); |
| 373 | + return 0; |
| 374 | +} |
| 375 | + |
351 | 376 | void cc_unmap_cipher_request(struct device *dev, void *ctx,
|
352 |
| - unsigned int ivsize, struct scatterlist *src, |
353 |
| - struct scatterlist *dst) |
| 377 | + unsigned int ivsize, struct scatterlist *src, |
| 378 | + struct scatterlist *dst) |
354 | 379 | {
|
355 | 380 | struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx;
|
356 | 381 |
|
@@ -472,6 +497,238 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
|
472 | 497 | return rc;
|
473 | 498 | }
|
474 | 499 |
|
| 500 | +int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx, |
| 501 | + struct scatterlist *src, unsigned int nbytes, |
| 502 | + bool do_update, gfp_t flags) |
| 503 | +{ |
| 504 | + struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx; |
| 505 | + struct device *dev = drvdata_to_dev(drvdata); |
| 506 | + u8 *curr_buff = cc_hash_buf(areq_ctx); |
| 507 | + u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx); |
| 508 | + struct mlli_params *mlli_params = &areq_ctx->mlli_params; |
| 509 | + struct buffer_array sg_data; |
| 510 | + struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle; |
| 511 | + u32 dummy = 0; |
| 512 | + u32 mapped_nents = 0; |
| 513 | + |
| 514 | + dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n", |
| 515 | + curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index); |
| 516 | + /* Init the type of the dma buffer */ |
| 517 | + areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL; |
| 518 | + mlli_params->curr_pool = NULL; |
| 519 | + sg_data.num_of_buffers = 0; |
| 520 | + areq_ctx->in_nents = 0; |
| 521 | + |
| 522 | + if (nbytes == 0 && *curr_buff_cnt == 0) { |
| 523 | + /* nothing to do */ |
| 524 | + return 0; |
| 525 | + } |
| 526 | + |
| 527 | + /*TODO: copy data in case that buffer is enough for operation */ |
| 528 | + /* map the previous buffer */ |
| 529 | + if (*curr_buff_cnt) { |
| 530 | + if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt, |
| 531 | + &sg_data)) { |
| 532 | + return -ENOMEM; |
| 533 | + } |
| 534 | + } |
| 535 | + |
| 536 | + if (src && nbytes > 0 && do_update) { |
| 537 | + if (cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE, |
| 538 | + &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, |
| 539 | + &dummy, &mapped_nents)) { |
| 540 | + goto unmap_curr_buff; |
| 541 | + } |
| 542 | + if (src && mapped_nents == 1 && |
| 543 | + areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) { |
| 544 | + memcpy(areq_ctx->buff_sg, src, |
| 545 | + sizeof(struct scatterlist)); |
| 546 | + areq_ctx->buff_sg->length = nbytes; |
| 547 | + areq_ctx->curr_sg = areq_ctx->buff_sg; |
| 548 | + areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI; |
| 549 | + } else { |
| 550 | + areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI; |
| 551 | + } |
| 552 | + } |
| 553 | + |
| 554 | + /*build mlli */ |
| 555 | + if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) { |
| 556 | + mlli_params->curr_pool = buff_mgr->mlli_buffs_pool; |
| 557 | + /* add the src data to the sg_data */ |
| 558 | + cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes, |
| 559 | + 0, true, &areq_ctx->mlli_nents); |
| 560 | + if (cc_generate_mlli(dev, &sg_data, mlli_params, flags)) |
| 561 | + goto fail_unmap_din; |
| 562 | + } |
| 563 | + /* change the buffer index for the unmap function */ |
| 564 | + areq_ctx->buff_index = (areq_ctx->buff_index ^ 1); |
| 565 | + dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n", |
| 566 | + cc_dma_buf_type(areq_ctx->data_dma_buf_type)); |
| 567 | + return 0; |
| 568 | + |
| 569 | +fail_unmap_din: |
| 570 | + dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE); |
| 571 | + |
| 572 | +unmap_curr_buff: |
| 573 | + if (*curr_buff_cnt) |
| 574 | + dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); |
| 575 | + |
| 576 | + return -ENOMEM; |
| 577 | +} |
| 578 | + |
| 579 | +int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx, |
| 580 | + struct scatterlist *src, unsigned int nbytes, |
| 581 | + unsigned int block_size, gfp_t flags) |
| 582 | +{ |
| 583 | + struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx; |
| 584 | + struct device *dev = drvdata_to_dev(drvdata); |
| 585 | + u8 *curr_buff = cc_hash_buf(areq_ctx); |
| 586 | + u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx); |
| 587 | + u8 *next_buff = cc_next_buf(areq_ctx); |
| 588 | + u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx); |
| 589 | + struct mlli_params *mlli_params = &areq_ctx->mlli_params; |
| 590 | + unsigned int update_data_len; |
| 591 | + u32 total_in_len = nbytes + *curr_buff_cnt; |
| 592 | + struct buffer_array sg_data; |
| 593 | + struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle; |
| 594 | + unsigned int swap_index = 0; |
| 595 | + u32 dummy = 0; |
| 596 | + u32 mapped_nents = 0; |
| 597 | + |
| 598 | + dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n", |
| 599 | + curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index); |
| 600 | + /* Init the type of the dma buffer */ |
| 601 | + areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL; |
| 602 | + mlli_params->curr_pool = NULL; |
| 603 | + areq_ctx->curr_sg = NULL; |
| 604 | + sg_data.num_of_buffers = 0; |
| 605 | + areq_ctx->in_nents = 0; |
| 606 | + |
| 607 | + if (total_in_len < block_size) { |
| 608 | + dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n", |
| 609 | + curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]); |
| 610 | + areq_ctx->in_nents = |
| 611 | + cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL); |
| 612 | + sg_copy_to_buffer(src, areq_ctx->in_nents, |
| 613 | + &curr_buff[*curr_buff_cnt], nbytes); |
| 614 | + *curr_buff_cnt += nbytes; |
| 615 | + return 1; |
| 616 | + } |
| 617 | + |
| 618 | + /* Calculate the residue size*/ |
| 619 | + *next_buff_cnt = total_in_len & (block_size - 1); |
| 620 | + /* update data len */ |
| 621 | + update_data_len = total_in_len - *next_buff_cnt; |
| 622 | + |
| 623 | + dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n", |
| 624 | + *next_buff_cnt, update_data_len); |
| 625 | + |
| 626 | + /* Copy the new residue to next buffer */ |
| 627 | + if (*next_buff_cnt) { |
| 628 | + dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n", |
| 629 | + next_buff, (update_data_len - *curr_buff_cnt), |
| 630 | + *next_buff_cnt); |
| 631 | + cc_copy_sg_portion(dev, next_buff, src, |
| 632 | + (update_data_len - *curr_buff_cnt), |
| 633 | + nbytes, CC_SG_TO_BUF); |
| 634 | + /* change the buffer index for next operation */ |
| 635 | + swap_index = 1; |
| 636 | + } |
| 637 | + |
| 638 | + if (*curr_buff_cnt) { |
| 639 | + if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt, |
| 640 | + &sg_data)) { |
| 641 | + return -ENOMEM; |
| 642 | + } |
| 643 | + /* change the buffer index for next operation */ |
| 644 | + swap_index = 1; |
| 645 | + } |
| 646 | + |
| 647 | + if (update_data_len > *curr_buff_cnt) { |
| 648 | + if (cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt), |
| 649 | + DMA_TO_DEVICE, &areq_ctx->in_nents, |
| 650 | + LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, |
| 651 | + &mapped_nents)) { |
| 652 | + goto unmap_curr_buff; |
| 653 | + } |
| 654 | + if (mapped_nents == 1 && |
| 655 | + areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) { |
| 656 | + /* only one entry in the SG and no previous data */ |
| 657 | + memcpy(areq_ctx->buff_sg, src, |
| 658 | + sizeof(struct scatterlist)); |
| 659 | + areq_ctx->buff_sg->length = update_data_len; |
| 660 | + areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI; |
| 661 | + areq_ctx->curr_sg = areq_ctx->buff_sg; |
| 662 | + } else { |
| 663 | + areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI; |
| 664 | + } |
| 665 | + } |
| 666 | + |
| 667 | + if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) { |
| 668 | + mlli_params->curr_pool = buff_mgr->mlli_buffs_pool; |
| 669 | + /* add the src data to the sg_data */ |
| 670 | + cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, |
| 671 | + (update_data_len - *curr_buff_cnt), 0, true, |
| 672 | + &areq_ctx->mlli_nents); |
| 673 | + if (cc_generate_mlli(dev, &sg_data, mlli_params, flags)) |
| 674 | + goto fail_unmap_din; |
| 675 | + } |
| 676 | + areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index); |
| 677 | + |
| 678 | + return 0; |
| 679 | + |
| 680 | +fail_unmap_din: |
| 681 | + dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE); |
| 682 | + |
| 683 | +unmap_curr_buff: |
| 684 | + if (*curr_buff_cnt) |
| 685 | + dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); |
| 686 | + |
| 687 | + return -ENOMEM; |
| 688 | +} |
| 689 | + |
| 690 | +void cc_unmap_hash_request(struct device *dev, void *ctx, |
| 691 | + struct scatterlist *src, bool do_revert) |
| 692 | +{ |
| 693 | + struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx; |
| 694 | + u32 *prev_len = cc_next_buf_cnt(areq_ctx); |
| 695 | + |
| 696 | + /*In case a pool was set, a table was |
| 697 | + *allocated and should be released |
| 698 | + */ |
| 699 | + if (areq_ctx->mlli_params.curr_pool) { |
| 700 | + dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n", |
| 701 | + &areq_ctx->mlli_params.mlli_dma_addr, |
| 702 | + areq_ctx->mlli_params.mlli_virt_addr); |
| 703 | + dma_pool_free(areq_ctx->mlli_params.curr_pool, |
| 704 | + areq_ctx->mlli_params.mlli_virt_addr, |
| 705 | + areq_ctx->mlli_params.mlli_dma_addr); |
| 706 | + } |
| 707 | + |
| 708 | + if (src && areq_ctx->in_nents) { |
| 709 | + dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n", |
| 710 | + sg_virt(src), &sg_dma_address(src), sg_dma_len(src)); |
| 711 | + dma_unmap_sg(dev, src, |
| 712 | + areq_ctx->in_nents, DMA_TO_DEVICE); |
| 713 | + } |
| 714 | + |
| 715 | + if (*prev_len) { |
| 716 | + dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n", |
| 717 | + sg_virt(areq_ctx->buff_sg), |
| 718 | + &sg_dma_address(areq_ctx->buff_sg), |
| 719 | + sg_dma_len(areq_ctx->buff_sg)); |
| 720 | + dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); |
| 721 | + if (!do_revert) { |
| 722 | + /* clean the previous data length for update |
| 723 | + * operation |
| 724 | + */ |
| 725 | + *prev_len = 0; |
| 726 | + } else { |
| 727 | + areq_ctx->buff_index ^= 1; |
| 728 | + } |
| 729 | + } |
| 730 | +} |
| 731 | + |
475 | 732 | int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
|
476 | 733 | {
|
477 | 734 | struct buff_mgr_handle *buff_mgr_handle;
|
|
0 commit comments