@@ -1895,30 +1895,31 @@ int Dbtup::handleReadReq(Signal* signal,
1895
1895
}
1896
1896
1897
1897
static
1898
- void
1899
- handle_reorg (Dbtup::KeyReqStruct * req_struct,
1900
- Dbtup::Fragrecord::FragState state)
1898
+ Uint32
1899
+ get_reorg_flag (Dbtup::KeyReqStruct * req_struct,
1900
+ Dbtup::Fragrecord::FragState state)
1901
1901
{
1902
1902
Uint32 reorg = req_struct->m_reorg ;
1903
1903
switch (state){
1904
1904
case Dbtup::Fragrecord::FS_FREE:
1905
1905
case Dbtup::Fragrecord::FS_REORG_NEW:
1906
1906
case Dbtup::Fragrecord::FS_REORG_COMMIT_NEW:
1907
1907
case Dbtup::Fragrecord::FS_REORG_COMPLETE_NEW:
1908
- return ;
1908
+ return 0 ;
1909
1909
case Dbtup::Fragrecord::FS_REORG_COMMIT:
1910
1910
case Dbtup::Fragrecord::FS_REORG_COMPLETE:
1911
1911
if (reorg != ScanFragReq::REORG_NOT_MOVED)
1912
- return ;
1912
+ return 0 ;
1913
1913
break ;
1914
1914
case Dbtup::Fragrecord::FS_ONLINE:
1915
1915
if (reorg != ScanFragReq::REORG_MOVED)
1916
- return ;
1916
+ return 0 ;
1917
1917
break ;
1918
1918
default :
1919
- return ;
1919
+ return 0 ;
1920
1920
}
1921
- req_struct->m_tuple_ptr ->m_header_bits |= Dbtup::Tuple_header::REORG_MOVE;
1921
+
1922
+ return Dbtup::Tuple_header::REORG_MOVE;
1922
1923
}
1923
1924
1924
1925
/* ---------------------------------------------------------------- */
@@ -2093,7 +2094,8 @@ int Dbtup::handleUpdateReq(Signal* signal,
2093
2094
2094
2095
if (req_struct->m_reorg != ScanFragReq::REORG_ALL)
2095
2096
{
2096
- handle_reorg (req_struct, regFragPtr->fragStatus );
2097
+ req_struct->m_tuple_ptr ->m_header_bits |=
2098
+ get_reorg_flag (req_struct, regFragPtr->fragStatus );
2097
2099
}
2098
2100
2099
2101
req_struct->m_tuple_ptr ->set_tuple_version (tup_version);
@@ -2909,6 +2911,13 @@ int Dbtup::handleInsertReq(Signal* signal,
2909
2911
2910
2912
disk_ptr->set_base_record_ref (ref);
2911
2913
}
2914
+
2915
+ if (req_struct->m_reorg != ScanFragReq::REORG_ALL)
2916
+ {
2917
+ req_struct->m_tuple_ptr ->m_header_bits |=
2918
+ get_reorg_flag (req_struct, regFragPtr->fragStatus );
2919
+ }
2920
+
2912
2921
setChecksum (req_struct->m_tuple_ptr , regTabPtr);
2913
2922
/* *
2914
2923
* At this point we hold the fragment mutex to ensure that TUP scans
@@ -2924,6 +2933,7 @@ int Dbtup::handleInsertReq(Signal* signal,
2924
2933
}
2925
2934
else
2926
2935
{
2936
+ /* ! mem_insert */
2927
2937
if (ERROR_INSERTED (4020 ))
2928
2938
{
2929
2939
c_lqh->upgrade_to_exclusive_frag_access ();
@@ -2947,11 +2957,23 @@ int Dbtup::handleInsertReq(Signal* signal,
2947
2957
* finalizing the writes on the row.
2948
2958
*/
2949
2959
m_base_header_bits &= ~(Uint32)Tuple_header::FREE;
2950
- }
2951
2960
2952
- if (req_struct->m_reorg != ScanFragReq::REORG_ALL)
2953
- {
2954
- handle_reorg (req_struct, regFragPtr->fragStatus );
2961
+ if (req_struct->m_reorg != ScanFragReq::REORG_ALL)
2962
+ {
2963
+ m_base_header_bits |=
2964
+ get_reorg_flag (req_struct, regFragPtr->fragStatus );
2965
+ }
2966
+
2967
+ /* *
2968
+ * Fragment-locking :
2969
+ * No need to protect this checksum write, we only perform it here for
2970
+ * non-first inserts since first insert operations are handled above
2971
+ * while holding the mutex. For non-first operations the row is not
2972
+ * visible to other threads at this time, copy rows are not even visible to
2973
+ * TUP scans, thus no need to protect it here. The row becomes visible
2974
+ * when inserted into the active list after returning from this call.
2975
+ */
2976
+ setChecksum (req_struct->m_tuple_ptr , regTabPtr);
2955
2977
}
2956
2978
2957
2979
/* Have been successful with disk + mem, update ACC to point to
@@ -2974,19 +2996,7 @@ int Dbtup::handleInsertReq(Signal* signal,
2974
2996
{
2975
2997
* accminupdateptr = 0 ; // No accminupdate should be performed
2976
2998
}
2977
- if (!mem_insert)
2978
- {
2979
- /* *
2980
- * No need to protect this checksum write, we only perform it here for
2981
- * non-first inserts since first insert operations are handled above
2982
- * while holding the mutex. For non-first operations the row is not
2983
- * visible to others at this time, copy rows are not even visible to
2984
- * TUP scans, thus no need to protect it here. The row becomes visible
2985
- * when inserted into the active list after returning from this call.
2986
- */
2987
- jamDebug ();
2988
- setChecksum (req_struct->m_tuple_ptr , regTabPtr);
2989
- }
2999
+
2990
3000
set_tuple_state (regOperPtr.p , TUPLE_PREPARED);
2991
3001
return 0 ;
2992
3002
0 commit comments