Quantcast
Channel: SCN : Blog List - ABAP Development
Viewing all 943 articles
Browse latest View live

Purchase order and Sales Document maintain in dialog mode

$
0
0

Sometimes the task is not to create order through Bapi, because in this case it will be saved,
if not error occured. If you need to go into a standard transaction, passing
with some header information and items. Here are fragments of programs that
make it easy to do this:

 

1) Purchase Order.

  Imagine, you have
an ALV report with output table gt_outtab and columns banfn and bnfpo -
purchase requisition no and item. no.

 

  DATA : header                        LIKE mepoheader,
       mepo_doc       
TYPE mepo_document,
       requisitions   
TYPE  mereq_t_eban_mem,
       wa_requisitions
LIKE LINE OF requisitions,
       items          
LIKE  mepoitem OCCURS 0 WITH HEADER LINE .
DATAlr_selections   TYPE REF TO cl_salv_selections,
       lt_rows        
TYPE salv_t_row,
       ls_row         
LIKE LINE OF  lt_rows.


  lr_selections
= gr_table->get_selections( ).
  lt_rows
= lr_selections->get_selected_rows( ).
 
IF lt_rows IS INITIAL .
   
MESSAGE s001 WITH 'select any line' .
   
EXIT .
 
ENDIF.
 
CLEAR : items , items[]  .
 
LOOP AT lt_rows INTO l_row.
   
READ TABLE gt_outtab INDEX l_row .
   
CHECK gt_outtab-ebeln IS INITIAL .
   
ADD 10 TO items-ebelp .
    items
-banfn = gt_outtab-banfn .
    items
-bnfpo = gt_outtab-bnfpo .
   
APPEND items.
 
ENDLOOP .

* call me21n
 
CALL FUNCTION 'WB2_PO_PROCESS_START'
   
EXPORTING
      im_ekko              
= header
      im_aktyp             
= 'H'
   
IMPORTING
      ch_requisitions      
= requisitions
   
TABLES
      cht_items            
= items
   
CHANGING
      ch_document          
= mepo_doc
   
EXCEPTIONS
      invalid_call         
= 1
      invalid_activity_type
= 2
      done                 
= 3
      error                
= 4
     
OTHERS                = 5.

 
IF sy-subrc <> 0.
* Implement suitable error handling here
 
ENDIF.

 

2)Sales order basis on previous sales document. Vbeln - number of previous sales document.

  SELECT SINGLE * FROM vbak INTO wa_vbak
        
WHERE vbeln = vbeln.

 

CALL FUNCTION 'Z_MAP_VBAKKOM_2_HDRIN' " code of FM below
   
EXPORTING
      i_vbakkom      
= vbakkom
   
IMPORTING
      order_header_in
= sales_header_in.

  CLEAR : sales_partners , sales_partners[].

 
DATA t_vbpa     LIKE vbpa OCCURS 0 WITH HEADER LINE  .
 
SELECT * FROM vbpa INTO TABLE t_vbpa WHERE vbeln = wa_vbak-vbeln .
 
LOOP AT t_vbpa .
    sales_partners
-partn_role = t_vbpa-parvw.
   
IF NOT t_vbpa-kunnr IS INITIAL   .
      sales_partners
-partn_numb = t_vbpa-kunnr.
   
ELSEIF NOT t_vbpa-pernr IS INITIAL   .
      sales_partners
-partn_numb = t_vbpa-pernr .
   
ENDIF .
    sales_partners
-itm_number = t_vbpa-posnr .
   
APPEND sales_partners.
 
ENDLOOP.

 

* items
 
CLEAR : sales_items_in[] ,      sales_items_in,
          sales_schedules_in[]
sales_schedules_in,
          sales_conditions_in[]
, sales_conditions_in,
          tab_root_instances[]
root_instances.

   
SELECT * FROM vbap INTO wa_vbap WHERE vbeln = vbeln.

     sales_items_in-itm_number     = wa_vbap-posnr .
    sales_items_in
-material       = wa_vbap-matnr .
    sales_items_in
-reason_rej     = wa_vbap-abgru .
    sales_items_in
-ref_doc        = wa_vbap-vbeln .
    sales_items_in
-ref_doc_it     = wa_vbap-posnr .
    sales_items_in
-ref_doc_ca     = wa_vbak-vbtyp .
    sales_items_in
-plant          = wa_vbap-werks .
    sales_items_in
-store_loc      = wa_vbap-lgort .
    sales_items_in
-po_itm_no      = sales_items_in-itm_number .
   
IF vbtyp_v = 'B'. " next doc is contract
      sales_items_in
-target_qty     wa_vbap-orfmng  .
   
ELSE .
      sales_schedules_in
-itm_number     = sales_items_in-itm_number .
      sales_schedules_in
-sched_line     = 1 .
      sales_schedules_in
-req_qty        = wa_vbap-orfmng   .
     
APPEND sales_schedules_in .
   
ENDIF .
   
APPEND sales_items_in.

* conditions
   
DATA amount_external LIKE  bapicurr-bapicurr .
   
SELECT        * FROM  konv INTO wa_konv
                   
WHERE  knumv  = wa_vbak-knumv
                     
AND  kposn  = wa_vbap-posnr
                     
AND  kherk  = 'C'.    " manual conditions
     
IF wa_konv-krech = 'A' .  
        wa_konv
-kbetr = wa_konv-kbetr / 10 .
     
ENDIF .
     
MOVE-CORRESPONDING wa_konv TO komv.
     
CALL FUNCTION 'MAP2E_KOMV_TO_BAPISDCOND'
       
EXPORTING
          komv      
= komv
       
CHANGING
          bapisdcond
= bapisdcond.

     
MOVE-CORRESPONDING bapisdcond TO sales_conditions_in .
     
IF bapisdcond-currency IS NOT INITIAL .
       
CALL FUNCTION 'BAPI_CURRENCY_CONV_TO_EXTERNAL'
         
EXPORTING
           
currency        = bapisdcond-currency
            amount_internal
= bapisdcond-cond_value
         
IMPORTING
            amount_external
= amount_external.
        sales_conditions_in
-cond_value = amount_external.
     
ENDIF .

      sales_conditions_in
-itm_number = sales_items_in-itm_number .
     
APPEND sales_conditions_in .
   
ENDSELECT.

   
IF tc = 'X'  .   " configuration if vehicle, variable tc is flag - material is vehicle

       root_instances-instance_no = wa_vbap-cuobj .
      root_instances
-posex  = sales_items_in-po_itm_no  .   
     
APPEND  root_instances TO tab_root_instances .
   
ENDIF .

 
ENDSELECT .

 

   IF tc = 'X'  .   " read configuration of previous doc. if it is vehicle
   
CALL FUNCTION 'CUXI_GET_MULTI_CONFIGURATION'
     
EXPORTING
        i_tab_root_instances        
= tab_root_instances
     
TABLES
        e_tab_cfg_headers           
= e_tab_cfg_headers
        e_tab_instances             
= e_tab_instances
        e_tab_part_of               
= e_tab_part_of
        e_tab_values                
= e_tab_values
        e_tab_var_keys              
= e_tab_var_keys
     
EXCEPTIONS
        invalid_instance            
= 1
        instance_is_a_classification
= 2
        no_root_instance            
= 3
        internal_error              
= 4
        invalid_data                
= 5
       
OTHERS                       = 6.


** clear some fields
   
LOOP AT e_tab_cfg_headers INTO  cfg_headers .
     
CLEAR :   cfg_headers-sce ,
                cfg_headers
-kbname,
                cfg_headers
-kbversion,
                cfg_headers
-complete,
                cfg_headers
-consistent,
                cfg_headers
-cfginfo,
                cfg_headers
-kbprofile,
                cfg_headers
-kblanguage,
                cfg_headers
-cbase_id,
                cfg_headers
-cbase_id_type .
     
MODIFY e_tab_cfg_headers  FROM cfg_headers.
   
ENDLOOP .

   
LOOP AT e_tab_instances INTO  instances .
     
CLEAR : instances-obj_txt ,
              instances
-quantity ,
              instances
-author ,
              instances
-quantity_unit ,
              instances
-complete ,
              instances
-consistent ,
              instances
-object_guid ,
              instances
-persist_id ,
              instances
-persist_id_type .
     
MODIFY e_tab_instances FROM instances .
   
ENDLOOP .

   
LOOP AT e_tab_values INTO values.
     
CLEAR  values-valcode .
     
MODIFY e_tab_values FROM  values .
   
ENDLOOP.
 
ENDIF .

 

 

* header conditions
 
SELECT        * FROM  konv INTO wa_konv
                 
WHERE  knumv  = wa_vbak-knumv
                   
AND  kposn  = '000000'
                   
AND  kherk  = 'C'.    " manual
   
IF wa_konv-krech = 'A' .        

      wa_konv-kbetr = wa_konv-kbetr / 10 .
   
ENDIF .
   
MOVE-CORRESPONDING wa_konv TO komv.
   
CALL FUNCTION 'MAP2E_KOMV_TO_BAPISDCOND'
     
EXPORTING
        komv      
= komv
     
CHANGING
        bapisdcond
= bapisdcond.

   
MOVE-CORRESPONDING bapisdcond TO sales_conditions_in .
    sales_conditions_in
-itm_number = '000000' .
   
APPEND sales_conditions_in .
 
ENDSELECT.

 

 

CALL FUNCTION 'BAPI_SALESDOCU_CREATEWITHDIA'
 
EXPORTING
    sales_header_in              
= sales_header_in

   synchronous                   = 'X'

   IMPORTING
   salesdocument_ex             
= vbeln_so
TABLES
  
return                        = return
   sales_items_in               
= sales_items_in

    sales_partners                = sales_partners
   sales_schedules_in           
= sales_schedules_in
   sales_conditions_in          
= sales_conditions_in
   sales_cfgs_ref               
= e_tab_cfg_headers
   sales_cfgs_inst              
= e_tab_instances
   sales_cfgs_value             
= e_tab_values .

 

 

  FUNCTION Z_MAP_VBAKKOM_2_HDRIN.
*"----------------------------------------------------------------------
*"*"Локальныйинтерфейс:
*"  IMPORTING
*"     REFERENCE(I_VBAKKOM) TYPE  VBAKKOM
*"  EXPORTING
*"     REFERENCE(ORDER_HEADER_IN) TYPE  BAPISDHEAD1
*"----------------------------------------------------------------------
 
MOVE i_vbakkom-auart TO order_header_in-doc_type        .
 
MOVE i_vbakkom-submi TO order_header_in-collect_no      .
 
MOVE i_vbakkom-vkorg TO order_header_in-sales_org       .
 
MOVE i_vbakkom-vtweg TO order_header_in-distr_chan      .
 
MOVE i_vbakkom-spart TO order_header_in-division        .
 
MOVE i_vbakkom-vkgrp TO order_header_in-sales_grp       .
 
MOVE i_vbakkom-vkbur TO order_header_in-sales_off       .
 
MOVE i_vbakkom-vdatu TO order_header_in-req_date_h      .
 
MOVE i_vbakkom-vprgr TO order_header_in-date_type       .
 
MOVE i_vbakkom-bsark TO order_header_in-po_method       .
 
MOVE i_vbakkom-bstdk TO order_header_in-purch_date      .
 
MOVE i_vbakkom-bstzd TO order_header_in-po_supplem      .
 
MOVE i_vbakkom-ihrez TO order_header_in-ref_1           .
 
MOVE i_vbakkom-bname TO order_header_in-name            .
 
MOVE i_vbakkom-telf1 TO order_header_in-telephone       .
 
MOVE i_vbakkom-konda TO order_header_in-price_grp       .
 
MOVE i_vbakkom-kdgrp TO order_header_in-cust_group      .
 
MOVE i_vbakkom-bzirk TO order_header_in-sales_dist      .
 
MOVE i_vbakkom-pltyp TO order_header_in-price_list      .
 
MOVE i_vbakkom-inco1 TO order_header_in-incoterms1      .
 
MOVE i_vbakkom-inco2 TO order_header_in-incoterms2      .
 
MOVE i_vbakkom-zterm TO order_header_in-pmnttrms        .
 
MOVE i_vbakkom-lifsk TO order_header_in-dlv_block       .
 
MOVE i_vbakkom-faksk TO order_header_in-bill_block      .
 
MOVE i_vbakkom-augru TO order_header_in-ord_reason      .
 
MOVE i_vbakkom-autlf TO order_header_in-compl_dlv       .
 
MOVE i_vbakkom-prsdt TO order_header_in-price_date      .
 
MOVE i_vbakkom-angdt TO order_header_in-qt_valid_f      .
 
MOVE i_vbakkom-bnddt TO order_header_in-qt_valid_t      .
 
MOVE i_vbakkom-guebg TO order_header_in-ct_valid_f      .
 
MOVE i_vbakkom-gueen TO order_header_in-ct_valid_t      .
 
MOVE i_vbakkom-kvgr1 TO order_header_in-cust_grp1       .
 
MOVE i_vbakkom-kvgr2 TO order_header_in-cust_grp2       .
 
MOVE i_vbakkom-kvgr3 TO order_header_in-cust_grp3       .
 
MOVE i_vbakkom-kvgr4 TO order_header_in-cust_grp4       .
 
MOVE i_vbakkom-kvgr5 TO order_header_in-cust_grp5       .
 
MOVE i_vbakkom-bstkd TO order_header_in-purch_no_c      .
 
MOVE i_vbakkom-bstkd_e TO order_header_in-purch_no_s      .
 
MOVE i_vbakkom-bstdk_e TO order_header_in-po_dat_s        .
 
MOVE i_vbakkom-bsark_e TO order_header_in-po_meth_s       .
 
MOVE i_vbakkom-ihrez_e TO order_header_in-ref_1_s         .
 
MOVE i_vbakkom-audat TO order_header_in-doc_date        .
 
MOVE i_vbakkom-gwldt TO order_header_in-war_date        .
 
MOVE i_vbakkom-vsbed TO order_header_in-ship_cond       .
 
MOVE i_vbakkom-ktext TO order_header_in-pp_search       .
 
MOVE i_vbakkom-mahza TO order_header_in-dun_count       .
 
MOVE i_vbakkom-mahdt TO order_header_in-dun_date        .
 
MOVE i_vbakkom-abrvw TO order_header_in-dlvscheduse     .
 
MOVE i_vbakkom-abdis TO order_header_in-plan_dlv_schtype.
 
MOVE i_vbakkom-vgbel TO order_header_in-ref_doc         .
 
MOVE i_vbakkom-bukrs_vf TO order_header_in-comp_code_b     .
 
MOVE i_vbakkom-taxk1 TO order_header_in-alt_tax_class   .
 
MOVE i_vbakkom-taxk2 TO order_header_in-tax_class2      .
 
MOVE i_vbakkom-taxk3 TO order_header_in-tax_class3      .
 
MOVE i_vbakkom-taxk4 TO order_header_in-tax_class4      .
 
MOVE i_vbakkom-taxk5 TO order_header_in-tax_class5      .
 
MOVE i_vbakkom-taxk6 TO order_header_in-tax_class6      .
 
MOVE i_vbakkom-taxk7 TO order_header_in-tax_class7      .
 
MOVE i_vbakkom-taxk8 TO order_header_in-tax_class8      .
 
MOVE i_vbakkom-taxk9 TO order_header_in-tax_class9      .
 
MOVE i_vbakkom-xblnr TO order_header_in-ref_doc_l       .
 
MOVE i_vbakkom-zuonr TO order_header_in-***_number      .
 
MOVE i_vbakkom-vgtyp TO order_header_in-ref_doc_cat     .
 
MOVE i_vbakkom-kzazu TO order_header_in-ord_comb_in     .
 
MOVE i_vbakkom-perfk TO order_header_in-bill_sched      .
 
MOVE i_vbakkom-perrl TO order_header_in-invo_sched      .
 
MOVE i_vbakkom-mrnkz TO order_header_in-mn_invoice      .
 
MOVE i_vbakkom-kurrf TO order_header_in-exch_rate_fi    .
 
MOVE i_vbakkom-valtg TO order_header_in-add_val_dy      .
 
MOVE i_vbakkom-valdt TO order_header_in-fix_val_dy      .
 
MOVE i_vbakkom-zlsch TO order_header_in-pymt_meth       .
 
MOVE i_vbakkom-ktgrd TO order_header_in-accnt_asgn      .
 
MOVE i_vbakkom-kursk TO order_header_in-exchg_rate      .
 
MOVE i_vbakkom-fkdat TO order_header_in-bill_date       .
 
MOVE i_vbakkom-fbuda TO order_header_in-serv_date       .
 
MOVE i_vbakkom-mschl TO order_header_in-dunn_key        .
 
MOVE i_vbakkom-mansp TO order_header_in-dunn_block      .
 
MOVE i_vbakkom-abssc TO order_header_in-pymt_gar_proc   .
 
MOVE i_vbakkom-abtnr TO order_header_in-department_no   .
 
MOVE i_vbakkom-empst TO order_header_in-rec_point       .
 
MOVE i_vbakkom-lcnum TO order_header_in-doc_num_fi      .
 
MOVE i_vbakkom-kdkg1 TO order_header_in-cust_cond_grp1  .
 
MOVE i_vbakkom-kdkg2 TO order_header_in-cust_cond_grp2  .
 
MOVE i_vbakkom-kdkg3 TO order_header_in-cust_cond_grp3  .
 
MOVE i_vbakkom-kdkg4 TO order_header_in-cust_cond_grp4  .
 
MOVE i_vbakkom-kdkg5 TO order_header_in-cust_cond_grp5  .
 
MOVE i_vbakkom-delco TO order_header_in-dlv_time        .
 
MOVE i_vbakkom-waerk TO order_header_in-CURRENCY        .
 
MOVE i_vbakkom-ernam TO order_header_in-created_by      .
 
MOVE i_vbakkom-landtx TO order_header_in-tax_depart_cty  .
 
MOVE i_vbakkom-stceg_l TO order_header_in-tax_dest_cty    .
 
MOVE i_vbakkom-xegdr TO order_header_in-eu_triang_deal  .
 
MOVE i_vbakkom-vbeln_grp TO order_header_in-master_contr    .
 
MOVE i_vbakkom-scheme_grp TO order_header_in-ref_proc        .
 
MOVE i_vbakkom-abruf_part TO order_header_in-check_partn_auth.
 
MOVE i_vbakkom-dat_fzau TO order_header_in-cml_qty_date    .
 
MOVE i_vbakkom-vsnmr_v TO order_header_in-version         .
 
MOVE i_vbakkom-qmnum TO order_header_in-notif_no        .
 
MOVE i_vbakkom-vkont TO order_header_in-fkk_conacct     .

ENDFUNCTION.


First real use of secondary indexes on an internal table

$
0
0

Introduction

 

Given the reluctance of the general ABAP community to use new-fangled (that is, over fifteen years old) concepts like SORTED and HASHED tables, I was hesitant to write about something a bit newer, but then I thought - what the heck, perhaps some people will find it an encouragement to use new stuff!

 

And I know this isn't that new!

 

So, we have HASHED tables, where the key is unique and the lookup time is constant for each record, and SORTED tables which mean we don't need BINARY SEARCH any more (except if we need sort descending...). For these tables, there's an index already defined to speed things up - but it's like a database table with just a primary index. Secondary keys are like additional indexes on database tables - but for internal tables.

 

I've heard it said that you should only use these if you've got tables with loads of information in. Well, so long as the data isn't being handled in a loop, I think it doesn't matter. If the data volume being processed is small, a few extra nano-seconds won't matter, and data volumes grow - so there's some future proofing in using the structures which are most efficient with large tables, right from the start.

 

Secondary keys

Here's that syntax, to refresh your memory.

 

TYPES dtype { {TYPE tabkind OF [REF TO] type}

            | {LIKE tabkind OF dobj} }

            [tabkeys]

            [INITIAL SIZE n].

 

And then tabkeys looks like this:

 

... [ WITH key ]
    [ WITH secondary_key1 ] [ WITH secondary_key2 ] ...
    [ {WITH|WITHOUT} FURTHER SECONDARY KEYS ] ... .

 

 

Additions

1. ... WITH FURTHER SECONDARY KEYS

 

2. ... WITHOUT FURTHER SECONDARY KEYS

 

Those additions, we'll forget about. They're for use when you're defining generic table types.

 

Now, for my purposes, I've got a questionnaire, with pages on it, categories of questions and questions. And I need to access it in many ways. So here's how I defined it:

 

TYPES:

     questionnaire_ty TYPE SORTED TABLE OF q_entry_ty WITH NON-UNIQUE KEY page_number cat_seq

                      WITH NON-UNIQUE SORTED KEY by_question COMPONENTS q_id

                      WITH NON-UNIQUE SORTED KEY by_cat_guid COMPONENTS cat_guid q_seq

                      WITH NON-UNIQUE SORTED KEY by_cat_text COMPONENTS cat_text

                      WITH NON-UNIQUE SORTED KEY by_cat_seq  COMPONENTS cat_seq .

 

The idea is that I can access an internal table of this type rapidly by page number, question id, category unique id (guid), category text and category sequence. Seems quite a lot, but the alternatives were to have a standard table and sort it and use binary search for each read, or not bother at all, and just put up with sequential reads.

 

Some problems

I've got the categories in my questionnaire in sequence order. So, naturally, I want to renumber them. The obvious way of doing this is

 

LOOP AT me->questionnaire ASSIGNING <entry> USING KEY by_cat_guid WHERE cat_guid EQ i_guid.

   ADD 1 TO index.

   <entry>-cat_seq = index.

ENDLOOP.

 

But there's a problem there. It dumps. And it dumps because cat_seq is part of the key by_cat_guid!

 

So, I thought, I'll delete the records, collect them and then insert them afterwards

LOOP AT me->questionnaire INTO entry USING KEY by_cat_guid WHERE cat_guid EQ i_guid.

   DELETE TABLE me->questionnaire FROM entry.

   <entry>-cat_seq = index.

    INSERT entry INTO TABLE renumbered.

ENDLOOP.

INSERT LINES OF renumbered INTO TABLE me->questionnaire

 

But data was still going amiss. The problem was, that the delete command deletes the entry that matches the primary key. So it was reading one entry in the LOOP AT, and deleting an entirely different entry (that matched the primary key) at the DELETE.

 

I tried the old DELETE... INDEX, but that got me nowhere. But a quick check of the syntax for DELETE gave me the hint.

 

LOOP AT me->questionnaire INTO entry USING KEY by_cat_guid WHERE cat_guid EQ i_guid.

   DELETE TABLE me->questionnaire FROM entry USING KEY by_cat_guid.

   <entry>-cat_seq = index.

   INSERT entry INTO TABLE renumbered.

ENDLOOP.

INSERT LINES OF renumbered USING KEY by_cat_guid INTO TABLE me->questionnaire

 

What to be aware of

With an internal table with additional keys, there are few things you really need to take care about.

 

1. You can't change a field of an entry you've ASSIGNED to, if that field is part of one of the keys

2. If you access data using one key - you really need to change it using the same key.

3. All of the usual internal table handling statements have the addition USING KEY. Sometimes it's vital - like with the DELETE example. Other times it's a matter of performance. For the INSERT LINES I could have omitted the USING KEY, and it would still work - however it is not as efficient, since I know that all my renumbered entries have the same cat_guid.

 

Final words

When new ABAP commands become available, try to use them. In my application, it probably won't make any difference. But what you don't use, you forget. Surely there will come a time when you do need additional accesses to internal tables - if you've already practiced, the next time it won't take as long.

Tooltip function in New ABAP Editor

$
0
0

Hello SCN,

I just found interesting function in New ABAP Editor. It's not actually any hidden, but I think not many use it.

UPDATE: from comments it seems it is available in SAP from "EHP6 731" (screenshots are from it)

 

You can found it in context menu (right click) on code:

tooltip-1.JPG

It has different outputs on different places. For example here is what it shows when used on FM call:

tooltip-2.JPG

 

It also works on method calls, variables (showing their type) and other code...

 

What might be very helpful is that you can copy text from tooltip and use it (eg. declare variables of needed type for FM/method call).

 

Example:

Instance method: GET_DDIC_FIELD
  Returns type description for Dictionary type
IMPORTING
     Value(P_LANGU) TYPE SYLANGU Optional SY-LANGU
        Current Language
RETURNING
     Value(P_FLDDESCR) TYPE DFIES Optional
        Field Description
EXCEPTIONS
      NOT_FOUND
      NO_DDIC_TYPE

Imagine having this tooltip in switchable side panel (similar like "repository browser" in SE80) and interactive on cursor position.

No more double-clicking on methods, functions, variables etc.. to see their type, description...

Wouldn't that be amazing?

Dynamic access to internal table (or range)

$
0
0

Hello SCN,

 

So the other day I had the following requirement (I work on a SAP CRM 7.0 system): I wrote a new program in which I needed some data processing which was already coded in the subroutine of another – already existing – program. Since it concerned a pretty large piece of code, I decided not to simply copy-paste the logic but to call the subroutine from within my program like this:

 

PERFORM subroutine INPROGRAM main_program CHANGING t_result.

 

Since the program in which I was calling the subroutine has a selection screen, and some of these parameters are used in the subroutine, I had to add an importing (USING) parameter to the subroutine which contained the values for these parameters. These values are partially supplied by the user in the selection screen of my program, and others are calculated in my program flow. So the above statement was corrected as follows:

 

PERFORM subroutine INPROGRAM main_program

USING     t_selscr_parameters

CHANGING  t_result.

 

Now comes the tricky part . The table T_SELSCR_PARAMETERS is a table with structure RSPARAMS (so basically the standard type for any selection screen, with components SELNAME, KIND, SIGN, OPTION, LOW and HIGH). Containing records with the exact names (SELNAME) of the corresponding selection screen parameter, and – of course – the value to be transferred to the selection screen parameter (e.g. SIGN=’I’, OPTION = ‘EQ’, LOW = ‘xxx’).

 

So I added some logic to the subroutine which we are calling: a loop over SELSCR_PARAMETERS to transfer the value of each table line into the corresponding parameter from our main program’s selection screen.

For a regular parameter, I knew I could work with a field symbol of type ‘any’, and simply assign the name of the parameter (LS_RSPARAM-SELNAME) to this field symbol – let’s name him <FS_ANY>. If the assignment works (which it should, because I named the parameter records in the SELSCR_PARAMETERS table exactly the same as the parameters in the selection screen), you can transfer the value in the selection screen parameter by using the following statement:

<FS_ANY> = LS_RSPARAM-LOW.

 

But.. next to the ‘regular’ parameters, there were also some ranges (SELECT-OPTIONS) which needed to be transferred into the selection screen. Ranges are in fact separate internal tables with header line

scr-1.jpg

So you could use the same statement as for a regular parameter

ASSIGN ls_rsparam-selname TO <fs_any>.

But it would not be useful, since you need to append a structure of type RSPARAMS to your range (assigned to <FS_ANY>) and you can't do that - because <FS_ANY> is not an internal table.

 

So, you might think, I simply create a new field-symbol <fs_anytab> TYPE ANY TABLE . That way I can assign ls_rsparam-selname to <fs_anytab>, and append to that field-symbol.

 

True, syntactically this logic would not cause any problems, and your program would activate without errors. But once you step over the statement, you will get the following shortdump:

scr-2.jpg

So below you can find how I solved this issue. I searched for answers in the forum discussions here on SCN, but couldn't find it immediately. Perhaps it is out there somewhere (especially since this concept is widely used in R/3, not so much in CRM though) but I blogged about this nonetheless, hoping to save a fellow colleague some valuable time ;

 

DATA:            ref(50)         TYPE c,

                 dref            TYPEREFTOdata.

FIELD-SYMBOLS:

                <fs_any>         TYPEany,

                <fs_any_1>       TYPEany,

                <fs_anytab>      TYPEANYTABLE.

 

LOOPAT i_selscr_parameters INTO ls_rsparam.

  CASE ls_rsparam-kind.

    WHEN'P'.

*     This is a regular parameter

      ASSIGN(ls_rsparam-selname)TO<fs_any>.

      IF<fs_any>ISASSIGNED.

        <fs_any> = ls_rsparam-low.

        UNASSIGN <fs_any>.

      ENDIF.

    WHEN'S'.

*     This is a range. Now ranges are in fact tables with header line,

*     and a row structure SIGN OPTION LOW HIGH.

      CONCATENATE: '(' sy-repid ')' ls_rsparam-selname '[]'INTOref.

      CONDENSErefNO-GAPS.

 

      ASSIGN(ref)TO<fs_anytab>.

*     So now we have the table (MAINPROGRAM)S_RANGE[] assigned to a

*     field-symbol of type ANY TABLE without dumping ;-)

      IF<fs_anytab>ISASSIGNED.

*       We still need a structure which has the same line type as <fs_anytab>

        CREATEDATA dref LIKELINEOF<fs_anytab>.

 

*       And now <fs_any> has our line type, we can start transferring the

*       values to the different components of the structure

        ASSIGN dref->* TO<fs_any>.

        IF<fs_any>ISASSIGNED.

          ASSIGNCOMPONENT'SIGN'OFSTRUCTURE<fs_any>TO<fs_any_1>.

          IF<fs_any_1>ISASSIGNED.

            <fs_any_1> = ls_rsparam-sign.

            UNASSIGN<fs_any_1>.

          ENDIF.

          ASSIGNCOMPONENT'OPTION'OFSTRUCTURE<fs_any>TO<fs_any_1>.

          IF<fs_any_1>ISASSIGNED.

            <fs_any_1> = ls_rsparam-option.

            UNASSIGN<fs_any_1>.

          ENDIF.

          ASSIGNCOMPONENT'LOW'OFSTRUCTURE<fs_any>TO<fs_any_1>.

          IF<fs_any_1>ISASSIGNED.

            <fs_any_1> = ls_rsparam-low.

            UNASSIGN<fs_any_1>.

          ENDIF.

          ASSIGNCOMPONENT'HIGH'OFSTRUCTURE<fs_any>TO<fs_any_1>.

          IF<fs_any_1>ISASSIGNED.

            <fs_any_1> = ls_rsparam-high.

            UNASSIGN<fs_any_1>.

          ENDIF.

        ENDIF.

      ENDIF.

  ENDCASE.

ENDLOOP.

 

NOTE: The point of this blog is to elaborate on accessing internal table variables dynamically across programs, I certainly do not claim this was the best or most performant solution to my original requirement . Any comments on this blog however are highly appreciated!

 

Cheers,

Tom.

Seek the most efficient way to detect whether there are table row with duplicate key

$
0
0

The requirement is: there is an internal table with a large number of table row.

 

If all rows have the identical recipient_id, that id( 30273 ) must be returned.

 

UUID

Phone_number

Recipient_id

0412ASFDSFDSFXCVS

138XXXXX1

30273

0412ASFDSFDSFXCVD

138XXXXX2

30273

0412ASFDSFDSFXCVF

138XXXXX3

30273

30273

 

If not, it must return empty.

UUID

Phone_number

Recipient_id

0412ASFDSFDSFXCVS

138XXXXX1

30273

0412ASFDSFDSFXCVD

138XXXXX2

30273

0412ASFDSFDSFXCVF

138XXXXX3

30273

30272

 

The table line type structure in the project looks like below:

clipboard1.png

Three different solutions have been made.

 

Approach1

the idea is a temporary table lt_sms_status is used to hold all the content of the internal table to be checked, and then SORT on the temporary table and delete adjacent entries. If all the table row have the same recipient id, after the operation there must be only one entry left.

    DATA: lt_sms_status LIKE it_tab.    lt_sms_status = it_tab.
SORT lt_sms_status BY recipient_id.
DELETE ADJACENT DUPLICATES FROM lt_sms_status COMPARING recipient_id.
IF lines( lt_sms_status ) = 1.
READ TABLE it_tab ASSIGNING FIELD-SYMBOL(<line>) INDEX 1.        ev_rec_id = <line>-recipient_id.
ENDIF.

The drawback of approach1 is it could lead to the unnecessary high memory assumption. when lt_sms_status = it_tab is executed, no new memory allocation will not occur, until the write operation on the copied content. This behavior is documented as "Delayed Copy".

We also have concern regarding the performance of SORT and DELETE keyword when they are executed on a big internal table.

clipboard2.png

Approach2

Now we fetch the recipient id of the first row, and compare it with the left rows in the table. If most of the table rows have different recipient id, the execution has the chance to quit early. However if unfortunately all the table rows have exactly the same recipient id, this approach has to loop until last table row.

  

 DATA: lv_diff_found TYPE abap_bool VALUE abap_false.
READ TABLE it_tab ASSIGNING FIELD-SYMBOL(<line>) INDEX 1.
DATA(lv_account_id) = <line>-recipient_id.
LOOP AT it_tab ASSIGNING FIELD-SYMBOL(<ls_line>).
IF lv_account_id <> <ls_line>-recipient_id.          lv_diff_found = abap_true.
EXIT.
ENDIF.
ENDLOOP.
IF lv_diff_found = abap_false.       ev_rec_id = lv_account_id.
ENDIF.

Approach3

the idea is similar as approach2, now instead of manual comparison inside each LOOP, we leverage "LOOP AT XXX WHERE condition".

  

 READ TABLE it_tab ASSIGNING FIELD-SYMBOL(<line>) INDEX 1.    LOOP AT it_tab ASSIGNING FIELD-SYMBOL(<ls_line>) WHERE recipient_id <> <line>-recipient_id.    ENDLOOP.
IF sy-subrc <> 0.       ev_rec_id = <line>-recipient_id.
ENDIF.

In order to measure the perfomance, we construct two kinds of test case. In the first one, we generate the internal table with N rows, each has exactly the same recipient id. And for the second, each one has different. Both are extreme kinds of scenarios. We may consider to measure the case between these two, for example for a N rows table there are 50% table rows have the same id and another 50% have difference one.

 

Performance test result

The time spent is measured in microsecond.

N = 1000

For the first test case, approach3 is most efficient. For the second test case, approach2 is the winner, as we expected.

clipboard4.png


N = 10000

clipboard5.png

N = 100000

clipboard6.png

N = 1000000

clipboard7.png

N = 5000000

clipboard8.png

Based on the performance result, we do not consider approach1 any more. For the choice between approach2 and 3, we need to investigate on the distriction of recipient id in the real world.

 

Maybe you can also share if you have better solutions?

Project Objectify - continued

$
0
0

Hi SCN community!

 

If you're not familiar with Matthew Billingham's Project Objectify, please read it before you continue.

 

The idea is simple... let's build a set of highly reusable, flexible and helpful abap classes that we share and collaborate on.

 

Who hasn't had the feeling to be writing the same code over and over again? To get some document flow, pricing conditions, etc. Wouldn't it make more sense to have a set of powerful abap classes, properly designed and coded, that you can easily export/import for reuse?

 

The idea was coined in 2009 by Matthew, and I was surprised to see no one had actually picked it up, so I've created a github repository for this, and I've started by sharing a few very simple classes, that I hope will set the template for future development.

 

Here is the link for it: https://github.com/EsperancaB/sap_project_object

 

Hope to see you there.

 

All the best,

Bruno

Several times call BAPI_GOODSMVT_CREATE in the user program

$
0
0

Here I will not write about the details of using BAPI_GOODSMVT_CREATE, has already
been written about this many times, and SCN including.

I propose to focus on one small detail, without which multiple call BAPI_GOODSMVT_CREATE will not work correctly.


Why BAPI_GOODSMVT_CREATE called repeatedly in his Z program? For example, you specify parameters for moving material,
but BAPI returned an error.
You change something and press the button again, causing BAPI.

 


So, if the call looks CALL FUNCTION 'BAPI_GOODSMVT_CREATE' again you'll get an error, despite the correct parameters.

But if you specify the addition CALL FUNCTION 'BAPI_GOODSMVT_CREATE' DESTINATION 'NONE' - the document will be created!

Thus, using DESTINATION 'NONE', you can be sure that the data buffer previous calls have no impact!

 

P.S. It is also necessary to specify DESTINATION 'NONE'  in calling COMMIT or ROLLBACK like below

IF p_matdoc IS INITIAL .

  CALL FUNCTION 'BAPI_TRANSACTION_ROLLBACK' DESTINATION 'NONE'.

ELSE .

  CALL FUNCTION 'BAPI_TRANSACTION_COMMIT' DESTINATION 'NONE'.

ENDIF .

CALL FUNCTION 'RFC_CONNECTION_CLOSE'    EXPORTING      destination = 'NONE'.

Addressing down

$
0
0

Triggered by this forum post Issue of blank lines removal in address in master page in Adobe form, I’ve decided to tackle a topic that has been eating at me for some years.

 

Why do developers seem so reluctant to use address nodes in forms?

 

This is not a criticism of Anindita, the author of the post who has inherited a form and understandably wants to minimise the amount of change.  It’s more a result of spending years having to convince developers that address nodes and not individual text fields are the best way to deliver this functionality.

 

"I was only doing what I was taught"

 

My own theory as to why this is not adopted puts the blame squarely with the SAP training material.  We’re all familiar with the flight model that is used in the ABAP training (I found my original ABAP training certificate from 1991 recently, and as I recall that course used the same model).  But the problem is that this model pre-dates the introduction of Central Address Management (or Business Address Services as it seems to be called now).  So while it’s fine for the programming courses, the form development courses tend not to give CAM or BAS the focus it deserves.  While the courses for SAPScript, Smartforms and adobe forms all cover the topic of the address node, none of them include the topic in the exercises.

 

When I taught the SAPscript and Smartform courses myself I always checked table ADRC in the training system and found some valid address numbers to both demonstrate their use and include the topic in the exercises, but any trainer focusing solely on the material will inevitably skim over this topic.

 

"I was only doing what I was told"

 

My other theory is that developers are following Functional Specs too closely.  A form FS will often include a mock-up something like this;

form_2.png

Then rather than challenging the specific example or just using an address node because it’s best practice, the developer will slavishly follow what has been specified.  And in the relatively clean data of a project test system all will be well, only when the vagaries of production data are introduced do blank lines appear in the address, and by then there’s a reluctance to make fundamental changes to forms.


The advantages to address nodes are many, compression of blank lines, prioritisation of lines when space is limited, international formatting, fewer fields passed from the print program or initialisation.  I could cover these in detail, but they’re all covered in the SAP help and there’s not a great deal I could add to that.


Now, like any technique I’m sure there are disadvantages to address nodes and please use the comments section to point out their shortcomings.  Otherwise, go out there and champion the often forgotten address node.


A small tip of viewing RAWSTRING field in SE16

$
0
0

Sometimes you would like to view the content of field with RAWSTRING type for a table:

clipboard1.png

The raw string represents the configuration with XML format however the correct format could not be viewed in SE16 directly.

clipboard2.png

In fact, the dynpro in the screenshot above is implemented by a program which is automatically generated by framework. You could find its name via System->Status:

clipboard3.png

clipboard4.png

clipboard5.png

execute report RS_ABAP_SOURCE_SCAN with search key = select * from BSPC_DL_PERSSTOR, search program = /1BCDWB/DBBSPC_DL_PERSSTOR.

clipboard6.png

Set breakpoint on the three search result:

clipboard7.png

relaunch SE16 and access the table, one of the breakpoint is triggered:

clipboard8.png

switch to XML Browser:

clipboard9.png

then you can see XML detail in debugger. With such tip it is not necessary to write any report to select the xml data out of the database table.

clipboard10.png


Shoot Me Up ABAP

$
0
0

Dependency Injection

 

image001.png

 

There is many a true word, Spoken Inject

 

One line summary:-

 

One way to write OO programs with many small classes with less lines of code.

 

Back Story

 

The other day there was a blog on SCN about Dependency injection.

 

http://scn.sap.com/community/abap/blog/2014/01/06/successful-abap-dependency-injection

 

I thought – I know what that is – if an object (say a car object) needs an engine object to work, you don’t have the car object create the engine object, you pass in the engine object through the car objects constructor.

 

I had thought that was solely concerned with unit testing, but if you look at the comments at the bottom, when I started talking about this on the blog comments, people soon put me right, it turns out it has a much wider scope.

 

As soon as I realised I was barking up the wrong tree, I read all I could on the subject, for example …

 

http://en.wikipedia.org/wiki/Dependency_injection

 

http://martinfowler.com/articles/injection.html

 

http://www.jamesshore.com/Blog/Dependency-Injection-Demystified.html

 

… ending with this blog by Jack Stewart

 

http://scn.sap.com/community/abap/blog/2013/08/28/dependency-injection-for-abap

 

I always thought the idea was great – often you have to create a bunch of objects and then “wire them together” by passing them into each other’s constructors so they know about each other.

 

This gives you the flexibility to pass in subclasses to alter the behaviour of the application – as I said I had first heard about this in the context of unit testing, but when I thought about it again naturally you can pass in any sort of subclass to change the way the program runs e.g. different subclass based on whatever criteria makes sense, just like the BADI filter mechanism.

 

That is a wonderful thing to be able to do, and subclassing is one of the few benefits of OO programming that one of my colleagues can get his head around, but it does tend to involve a lot of “boiler plate” programming i.e. lots of CREATE OBJECT statements, passing in assorted parameters.

 

Many Small Classes, make Light Work

 

http://scn.sap.com/community/abap/blog/2013/08/22/the-issue-with-having-many-small-classes

 

The idea is that the smaller and more focused your classes are, the easier they are to re-use and maintain. An OO principle is that a class should only have one reason to change i.e. it should do one thing only. If you follow that principle you get loads of benefits, but you have to create loads of classes in your program.

 

When I first started playing around with OO programming I was too lazy to keep writing CREATE OBJECT so I made everything static. That is not actually a sensible thing to do just to avoid work, as then you can’t subclass things. SAP itself found that out when they initially made ABAP proxy classes static.

 

The NEW Objects on the Block

 

In the Java programming language you create objects by saying GOAT = NEW GOAT as opposed to CREATE OBJECT GOAT.

 

In the “Head First Design Patterns Book” it gives a bunch of about five rules of programming which every Java programmer should aspire to but are in fact impossible to follow in real life.

 

One of those revolved around the rule being never to use the NEW statement because that hard coded the exact type of class you were creating, but how can you create objects if the only way to create them is to use the NEW statement?

 

In both Java and ABAP interfaces come into play here, you declare the ANIMAL object as an interface of type FARM ANIMAL (which GOAT implements) and say CREATE OBJECT ANIMAL TYPE GOAT. Perhaps a better example is in ABAP2XLS when you declare the object that writes out the file as an interface and then create it using the TYPE of the EXCEL version you want e.g. 2007.

 

Now you are always going to have to say the specific type (subclass) you want somewhere, but is it possible to decouple this from the exact instant you call the CREATE OBJECT statement?

 

Since you can have a dynamic CREATE OBJECT statement, you would think so, but how does this apparent diversion link back to what I was talking about earlier?

 

Jack Black and his blog Silver

 

Going back to Dependency Injection the blog by Jack Stewart contained a link to download some sample code. I downloaded it, had a look, thought it was great, and then totally re-wrote it. That is no reflection on the quality of the original; I am just physically incapable of not rewriting every single thing I come across.

 

I am going to include a SAPLINK file in text format at the end of this blog, but first I shall go through the code, top down. Firstly, this test program shows exactly what I am trying to achieve i.e. the same thing in less lines of code.

 

I have created some dummy Y classes which just have constructors to pass in a mixture of object instances and elementary data object parameters, my dear Watson. They only have one method each, just to write out if they are a base class or a subclass. The important thing is the effort involved to create them.

 

The Da Vinci Code Samples

 

First of all, a basic structure to get some elementary parameters and say if we want to use a test double or not. I am sticking with the unit test concept for now, but as I mentioned, you can pass in any old subclass you want, according to the good old, every popular, Liskov Substitution principle.

 

*&---------------------------------------------------------------------*
*& Report  Y_INJECTION_TEST
*&
*&---------------------------------------------------------------------*
* Show two ways to create linked objects, one using dependency injection
*--------------------------------------------------------------------*
REPORT  y_injection_test.

PARAMETERS : p_valid TYPE sy-datum,
             p_werks
TYPE werks_d,
             p_test 
AS CHECKBOX.

INITIALIZATION.
  p_valid
= sy-datum.
  p_werks
= '3116'.

START-OF-SELECTION.
 
PERFORM do_it_the_long_way.
 
PERFORM do_it_the_short_way.

 

It’s a Long Long Way, from there to here

 

Firstly, the traditional way….

 

*&---------------------------------------------------------------------*
*&      Form  DO_IT_THE_LONG_WAY
*&---------------------------------------------------------------------*
* Normal way of doing things
*----------------------------------------------------------------------*
FORM do_it_the_long_way .
 
DATA: lo_logger        TYPE REF TO ycl_test_logger.
 
DATA: lo_db_layer      TYPE REF TO ycl_test_db_layer.
 
DATA: lo_mock_db_layer TYPE REF TO ycl_test_mock_db_layer.
 
DATA: lo_simulator     TYPE REF TO ycl_test_simulator.

 
CREATE OBJECT lo_logger.

 
IF p_test = abap_true.

   
CREATE OBJECT lo_mock_db_layer
     
EXPORTING
        io_logger  
= lo_logger
        id_valid_on
= p_valid.

   
CREATE OBJECT lo_simulator
     
EXPORTING
        id_plant_id  
= p_werks
        io_db_layer  
= lo_mock_db_layer
        io_logger    
= lo_logger.

 
ELSE.

   
CREATE OBJECT lo_db_layer
     
EXPORTING
        io_logger  
= lo_logger
        id_valid_on
= p_valid.

   
CREATE OBJECT lo_simulator
     
EXPORTING
        id_plant_id  
= p_werks
        io_db_layer  
= lo_db_layer
        io_logger    
= lo_logger.

 
ENDIF.

  lo_simulator
->say_who_you_are( ).

 
SKIP.

ENDFORM.                    " DO_IT_THE_LONG_WAY

 

Get Shorty

 

Now we do the same thing, using a Z class I created to use dependency injection.

 

*&---------------------------------------------------------------------*
*&      Form  DO_IT_THE_SHORT_WAY
*&---------------------------------------------------------------------*
*  Using Constructor Injection
*----------------------------------------------------------------------*
FORM do_it_the_short_way .
* Local Variables
 
DATA: lo_simulator  TYPE REF TO ycl_test_simulator.

  zcl_bc_injector
=>during_construction( :
    for_parameter
= 'ID_PLANT_ID' use_value = p_werks ),
    for_parameter
= 'ID_VALID_ON' use_value = p_valid ).

 
IF p_test = abap_true.
   
"We want to use a test double for the database object
    zcl_bc_injector
=>instead_of( using_main_class = 'YCL_TEST_DB_LAYER'
                                 use_sub_class   
= 'YCL_TEST_MOCK_DB_LAYER' ).
 
ENDIF.

  zcl_bc_injector
=>create_via_injection( CHANGING co_object = lo_simulator ).

  lo_simulator
->say_who_you_are( ).

ENDFORM.                    " DO_IT_THE_SHORT_WAY

 

I think the advantage is self-evident – the second way is much shorter, and it’s got Big Feet.

 

If the importing parameter of the object constructor was an interface it would not matter at all. You just pass the interface name in to the INSTEAD_OF method as opposed to the main class name.

 

I have done virtually no error handling in the below code, except throwing fatal exceptions when unexpected things occur. This could be a lot more elegant, I am just demonstrating the basic principle.

 

Firstly the DURING CONSTRUCTION method analyses elementary parameters and then does nothing more fancy than adding entries to an internal table.

 

* Local Variables
 
DATA: lo_description       TYPE REF TO cl_abap_typedescr,
        ld_dummy            
TYPE string ##needed,
        ld_data_element_name
TYPE string,
        ls_parameter_values 
LIKE LINE OF mt_parameter_values.

  ls_parameter_values
-identifier = for_parameter.

 
CREATE DATA ls_parameter_values-do_value LIKE use_value.
 
GET REFERENCE OF use_value INTO ls_parameter_values-do_value.

 
CHECK sy-subrc = 0.

 
CALL METHOD cl_abap_structdescr=>describe_by_data_ref
   
EXPORTING
      p_data_ref          
= ls_parameter_values-do_value
    RECEIVING
      p_descr_ref         
= lo_description
   
EXCEPTIONS
      reference_is_initial
= 1
     
OTHERS               = 2.

 
IF sy-subrc <> 0.
   
RETURN.
 
ENDIF.

 
SPLIT lo_description->absolute_name AT '=' INTO ld_dummy ld_data_element_name.

  ls_parameter_values
-rollname = ld_data_element_name.

 
INSERT ls_parameter_values INTO TABLE mt_parameter_values.

 

It’s the same deal with the INSTEAD_OF method for saying what exact subclass you want to create, except it’s even simpler this time.

 

METHOD instead_of.
* Local Variables
 
DATA: ls_sub_classes_to_use LIKE LINE OF mt_sub_classes_to_use.

  ls_sub_classes_to_use
-main_class = using_main_class.
  ls_sub_classes_to_use
-sub_class  = use_sub_class.

 
"Add entry at the start, so it takes priority over previous
 
"similar entries
 
INSERT ls_sub_classes_to_use INTO mt_sub_classes_to_use INDEX 1.

ENDMETHOD.

 

Now we come to the main CREATE_BY_INJECTION method.  I like to think I have written this as close to plain English as I can, so that this is more or less elf-explanatory.

 

METHOD create_via_injection.
* Local Variables
 
DATA: lo_class_in_reference_details  TYPE REF TO cl_abap_refdescr,
        lo_class_in_type_details      
TYPE REF TO cl_abap_typedescr,
        lo_class_to_create_type_detail
TYPE REF TO cl_abap_typedescr,
        ld_class_passed_in            
TYPE seoclass-clsname,
        ld_class_type_to_create       
TYPE seoclass-clsname,
        ls_created_objects            
LIKE LINE OF mt_created_objects,
        lt_signature_values           
TYPE abap_parmbind_tab.

* Determine the class type of the reference object that was passed in
  lo_class_in_reference_details ?= cl_abap_refdescr
=>describe_by_data( co_object ).
  lo_class_in_type_details      
= lo_class_in_reference_details->get_referenced_type( ).
  ld_class_passed_in            
= lo_class_in_type_details->get_relative_name( ).

 
"See if we need to create the real class, or a subclass
  determine_class_to_create
(
   
EXPORTING
      id_class_passed_in            
= ld_class_passed_in
      io_class_in_type_details      
= lo_class_in_type_details
   
IMPORTING
      ed_class_type_to_create       
= ld_class_type_to_create
      eo_class_to_create_type_detail
= lo_class_to_create_type_detail ).

 
READ TABLE mt_created_objects INTO ls_created_objects WITH TABLE KEY clsname = ld_class_type_to_create.

 
IF sy-subrc = 0.
   
"We already have an instance of this class we can use
    co_object ?= ls_created_objects
-object.
   
RETURN.
 
ENDIF.

 
"See if the object we want to create has parameters, and if so, fill them up
  fill_constructor_parameters
( EXPORTING io_class_to_create_type_detail = lo_class_to_create_type_detail
                              
IMPORTING et_signature_values            = lt_signature_values ).


  create_parameter_object
( EXPORTING id_class_type_to_create = ld_class_type_to_create
                                     it_signature_values    
= lt_signature_values       " Parameter Values
                          
CHANGING  co_object               = co_object ).              " Created Object

ENDMETHOD.

 

There is not a lot of point in drilling into this any further – I would encourage you to download the SAPLINK file, and then run this in debug mode to see what is happening.

 

In summary, I am always on the lookout for ways to reduce the so called “boiler plate” code, so the remaining code can concentrate on what the application is supposed to be doing as opposed to how it is doing it. This dependency injection business seems ideally suited so this purpose.

 

Now, while I am here.

 

image002.png

 

Did I mention I am giving a speech at the “Mastering SAP Technology 2014” conference at Melbourne on the 31/03/2014 – it’s about unit testing of ABAP programs.

 

What’s that? I’ve already mentioned this? Many times?

 

Oh dear, that must have slipped my mind. In that case I won’t go on about it, and I’ll sign off.

 

Cheersy Cheers

 

Paul

 

#SAPTechEd 2013 Interview of the Week: ABAP Code Pushdown through SAP HANA

$
0
0

In 2013's SAP TechEd Las Vegas I had the opportunity and pleasure of chatting with Sudipto Shankar Dasgupta and Pradeep S from the Custom Development and Strategic Projects team about their work on pushdown of ABAP programs to HANA.


The discussion went around the following broad topics

·       Relevance of code push down and its benefits

·       Reasons for choosing code push down as an option for optimization

·       Understanding the topic from a developer’s perspective

·       Customer stories





In 2014, SAP TechEd the SAP TechEd name will be retired and the conference will evolve into an exciting new program called SAP d-code, which will address education, collaboration, and networking for the entire SAP ecosystem of developers and technology professionals, incorporating the best elements of SAP TechEd. Hope to see you there. Learn More

Number Ranges – Internal or External ranges Best Practice Scenario

$
0
0

Speaking about the number ranges, I am trying to give a small write up where in I am presenting the best practice scenario for number ranges.

 

Old Numbers Vs New Numbers

Moving from Legacy system to New environment presents an opportunity to clean up the data.
It is very common that there will be duplicate entries in the legacy system and some master data that’s outdated.

With the new system, we will have an opportunity to get rid of the data that’s of no use for business. This also
reduces data maintenance costs.

 

If we go for Old Numbers (using legacy numbers in the new system also) and if there were
duplicates, it’s possible that there would be gaps in the numbers.

 

Because of the above reasons, Businesses go for new number ranges. Unless there is a specific
‘business reason’ it is strongly advised to go for new numbers.

Who decides Number Ranges (Internal or External)?

 

Master Data: There is some Master Data for which normally businesses go for external numbers – for
example, Finished Products – where number of records is very small (in a few thousands).

Businesses may want to ‘construct’ their numbers based on a particular criteria – for example,

Finished Products start with ‘1’ followed by Plant ‘0002’ then followed by Product Group ‘01’ and then a 5 digit number
10001 – this will give us number 100020110001. Users would be able to find very
easily what this product is and where it is produced etc., very easily.

 

Note:- Even here, every number has to be ‘constructed’ meticulously and it takes lot of efforts.

               
There is some Master Data for which normally businesses go for internal numbers – for

example, Customers or Raw materials – where number of records is very high (in few hundred thousands).

So, Sold-to customers could be from 1000000000 to 1999999999. It does not make sense to have

external number ranges here because it will break the backs of business team members!

 

Transaction Data: In general Transaction Data is always a candidate for ‘Internal Numbers’. I have
never seen any engagement going for external numbers here.


Points:

There will be high resistance from Business Users if existing numbers are going to be changed –
they already memorized everything and they (even we do) hate to lose association with old pals.

         We should present them following things here:

  • How the duplicates effect the gaps in numbers
  • How the SAP system STILL allows them to use old numbers to search new numbers.
  • Involve them in ‘constructing’ the number ranges.


     2. Let the system ‘manage’ numbers – it’s better to let system do the work for us rather than involving 100’s of users

 

While doing Data Migration, we follow a different method.We ask functional people to make every number range as ‘external’ and

then let ETL tool generate the numbers and load the data.

Once the data is loaded, all the number ranges are turned back to their original status.

 

 

 

 

 

 













Raw data serialization to be used in RFC

$
0
0

Hi there.

As data growth is constantly increasing handling such amount of data requires more and more time. Most logical way of solving this issue is to handle data in parallel. Currently for parallel processing ABAP offers only one way - RFC enabled functional modules. This approach is quite old and mostly known. But ... there are some limitations exist in RFC FM's. One of it : it doesn't allow to pass references. Usually it's not a problem, but in my case tool is working with multiple data structures, that usually stored as ref to data.

It's impossible to pass such data directly in RFC FM, so here is the trick:

  1. Assign variable with ref to data to field symbol
  2. Export this field symbol to buffer as XString
  3. Pass xstring to RFC FM
  4. Perform backward transformation

 

There are two small tricks with import/export: first one is that you have to explicitly name objects that you export, and during import use same names. Even if data buffer contains data for exactly one object. Other thing is the compression : in this small test without compression data buffer have length 190 bytes, and with compression just 95.

Documentation says that this export routine could fail in case of out of memory, but current limits are not described.

 

Here is the simple example how it works:

REPORT Z_BINARY_TRANSFORM.
data: lr_data type ref to data,      lt_test type table of string,      wa_string type string,      x_buffer  type xstring.
FIELD-SYMBOLS: <fs1> type any,               <fs2> type any.
INITIALIZATION.
** Fill source table with test data
DO 5 TIMES.  wa_string = sy-index.  CONDENSE wa_string.  CONCATENATE 'test' wa_string  into wa_string SEPARATED BY '_'.  append wa_string to lt_test.
ENDDO.
create data lr_data like lt_test.
assign lt_test to <fs1>.
assign lr_data->* to <fs2>.<fs2> = <fs1>.
export rep_tab = <fs2> to data buffer x_buffer compression on.
PERFORM abc using x_buffer.
form abc  using in_buffer type xstring.  data: lr_data2 type ref to data,        lt_test2 type table of string.  field-symbols: <fs3> type standard table,                 <fs4> type any.  create data lr_data2 type table of string.  assign lr_data2->* to <fs3>.
*  append 'test' to <fs3>.  import rep_tab = <fs3> from data buffer in_buffer.  " Import must be performed on the same variable name that was used for export
****  Output supplied table  LOOP AT <fs3> ASSIGNING <fs4>.    write: <fs4>.    NEW-LINE.  ENDLOOP.
ENDFORM.

Simplification of import of serialized data for RFC usage

$
0
0

Hi there.

As it was shown in previous example it's possible to supply any data into RFC enabled FORM. But, at the same time direct usage of IMPORT clause requires explicit specification of the ID's in data buffer and also involves some manual work to define types. Fortunately there is a special class CL_ABAP_IMPEXP_UTILITIES that could handle all dirty work.

The only limitation I've found - it works pretty good with DDIC types, but for custom defined types it could fail without any detailed explanation.

This greatly simplifies extension of the methods and makes code much more compact and readable.

I've tested this example with 50000 lines and seems ABAP can handle such amount of data. Generally for the parallel processing this should be enough.

 

So, here is the sample code:

REPORT Z_BINARY_TRANSFORM2.
data: x_b2      type xstring,      lt_abc    type STANDARD TABLE OF t000,      wa_t000   type t000.
INITIALIZATION.
* Fill table with sample data
wa_t000-mandt = sy-mandt.
wa_t000-mtext = 'SCN Demo #1'.
wa_t000-ort01 = 'Moscow'.
append wa_t000 to lt_abc.
wa_t000-mandt = sy-mandt.
wa_t000-mtext = 'SCN Demo #2'.
wa_t000-ort01 = 'Tokio'.
append wa_t000 to lt_abc.
* Export data into buffer
export tadir = lt_abc to DATA BUFFER x_b2 COMPRESSION on.
* Check the result
perform describe_buffer using x_b2.
form describe_buffer  using in_buffer type xstring.  data: lt_datatab type tab_cpar.  FIELD-SYMBOLS: <fs> like line of lt_datatab,                 <fs2> type any table,                 <fs3> type any.  lt_datatab = cl_abap_expimp_utilities=>dbuf_import_create_data( dbuf = in_buffer ).  loop at lt_datatab assigning <fs>.        write : <fs>-name.     " Here is the name of the currently processed tab in buffer        new-line.        assign <fs>-dref->* to <fs2>.        loop at <fs2> assigning <fs3>.                  write : <fs3>.                  new-line.        endloop.  endloop.
endform.

Objects serialization for RFC forms

$
0
0

Hi there.

As it was shown previously there are some limitations on usage of RFC enabled forms. Most of it is easily avoidable, but the main limitation is that it's impossible to pass references into such forms. Currently import/export routines doesn't allow to pass references, so here is the problem.

An option to bypass such limitation is to serialize objects into some string container. Lucky us, SAP developed transformation called "id" that could handle everything. But, at the same time, such transformation creates an XML, so there is a big overhead. But, it could be handled as well, as SAP developed kernel-based methods for compression of strings. In this test length of the original XML is 586 bytes, and length of compressed one is 302. Pretty impressive, huh ?

Only thing that should be highlighted is that class used for serialization must implement interface IF_SERIALIZABLE_OBJECT. Actually this interface doesn't declare any methods, so you have to put it in interfaces section. That's it !

 

Another addition : this method is works pretty well if object contains nested objects, after deserialization objects would be recreated and references would be set properly. So, for example this method would work well for linked links. There is no example here, to keep things simple, but it works.

 

So, here is the code (there is no exceptions handling for the simplicity, but in productive software you have to always handle such exceptions):

REPORT Z_BINARY_TRANSFORM3.
class z_ser_data definition.  PUBLIC SECTION.    INTERFACES: IF_SERIALIZABLE_OBJECT.    data: member type string read-only.  METHODS:    constructor      IMPORTING        in_val type string,    get_val      RETURNING VALUE(out_val) type string,    change_val      IMPORTING        new_val type string.
endclass.
class z_ser_data implementation.  METHOD constructor.    member = in_val.  ENDMETHOD.  method get_val.    out_val = me->member.  endmethod.  method change_val.    me->member = new_val.  endmethod.
endclass.
data: lr_class type ref to z_ser_data,      lv_ser_xml type string,      lv_x_gzip  type xstring.
FIELD-SYMBOLS: <fs> type any.
INITIALIZATION.
create object lr_class  exporting      in_val = 'String from calling program'.
CALL TRANSFORMATION id  SOURCE model = lr_class  RESULT XML lv_ser_xml.
CL_ABAP_GZIP=>compress_text(  exporting    text_in        = lv_ser_xml    " Input Text
*    text_in_len    = -1    " Input Length    compress_level = 9    " Level of Compression
*    conversion     = 'DEFAULT'    " Conversion to UTF8 (UC)  importing    gzip_out       = lv_x_gzip     " Compressed output
*    gzip_out_len   =     " Output Length
).
*  catch cx_parameter_invalid_range.    " Parameter with Invalid Range
*  catch cx_sy_buffer_overflow.    " System Exception: Buffer too Short
*  catch cx_sy_conversion_codepage.    " System Exception Converting Character Set
*  catch cx_sy_compression_error.    " System Exception: Compression Error
perform describe_buffer using lv_x_gzip.
form describe_buffer  using in_buffer type xstring.  data: lr_another_obj type ref to z_ser_data,        lv_str type string.  CL_ABAP_GZIP=>decompress_text(    exporting      gzip_in      = in_buffer    " Input of Zipped Data
*      gzip_in_len  = -1    " Input Length
*      conversion   = 'DEFAULT'    " Conversion to UTF8 (UC)    importing      text_out     = lv_str    " Decompessed Output
*      text_out_len =     " Output Length  ).
*    catch cx_parameter_invalid_range.    " Parameter with Invalid Range
*    catch cx_sy_buffer_overflow.    " System Exception: Buffer too Short
*    catch cx_sy_conversion_codepage.    " System Exception Converting Character Set
*    catch cx_sy_compression_error.    " System Exception: Compression Error  CALL TRANSFORMATION id    SOURCE XML lv_str    RESULT model = lr_another_obj.  write: 'Received val:', lr_another_obj->member.  NEW-LINE.  lr_another_obj->change_val( new_val = 'New val' ).  write: 'Changed val:', lr_another_obj->member.  new-line.
endform.

The last runtime buffer you'll ever need?

$
0
0

Hi SCN community!

 

It's me again, with another contribution to Project Object.

 

Has it ever happened to you to be in a situation where you might be requesting the same thing over and over again to the database?

 

And if you're a good developer, you avoided repetitive calls to the database implementing a buffer, correct?

 

Well, what I've got for you today is a class the will serve as a buffer for everything you want! Everything? Everything!

 

 

I can't take full credits for this though... I got this from a guy who got this from another guy... so I have no idea who was the actual developer of this thing. I can take the credit for "perfecting" it though, and implementing some exception classes in it. So at least that

 

You'll be able to find it in nugget and text version in my github, in the utilities section:

GitHub

 

 

Use example

 

Below is just an example of how to use this class. I am fully aware that the first "loop" is not how someone would properly perform this particular select to the database, this is meant simply as an example of how to use this class and for what.

 

 

 

DATA:
      db_counter TYPE i,
      lt_sbook  TYPE TABLE OF sbook,
      ls_sbook  LIKE LINE OF lt_sbook,
      ls_sbuspart TYPE sbuspart.

SELECT * FROM sbook
  INTO TABLE lt_sbook.

BREAK-POINT.

CLEAR db_counter.

LOOP AT lt_sbook INTO ls_sbook.

  SELECT SINGLE * FROM sbuspart
    INTO ls_sbuspart
    WHERE buspartnum = ls_sbook-customid.
  ADD 1 TO db_counter.

ENDLOOP.

"check db_counter
BREAK-POINT.

CLEAR db_counter.

LOOP AT lt_sbook INTO ls_sbook.

  TRY.

      CALL METHOD zcl_buffer=>get_value
        EXPORTING
          i_name = 'CUSTOMER_DETAILS'
          i_key  = ls_sbook-customid.
    CATCH zcx_buffer_value_not_found.

      "If we haven't saved it yet, get it and save it

      SELECT SINGLE * FROM sbuspart
        INTO ls_sbuspart
        WHERE buspartnum = ls_sbook-customid.
      ADD 1 TO db_counter.

      CALL METHOD zcl_buffer=>save_value
        EXPORTING
          i_name  = 'CUSTOMER_DETAILS'
          i_key   = ls_sbook-customid
          i_value = ls_sbuspart.

  ENDTRY.

ENDLOOP.

"check db_counter
BREAK-POINT.

 

Performance remark

 

One last remark that I should make though... due to the high flexibility of this buffer, I think it's not possible to have a sorted read (or, in other words, a fast read) of the value in the buffer. Therefore, if you are using a buffer with a high volume of entries, and if performance is critical, you should create a subclass and redefine the "key" with the type you are interested in particular, and also redefine the get method to replace the "LOOP" statement with a "READ" statement.

 

All the best!

Bruno

How hard is to be an expert

$
0
0

 

 

When I saw this video I said "Hey !! That's my life ! my daily job !"

 

 

Fred

How to create the only buffer you'll ever need...

$
0
0

Introduction

 

This blog has been inspired by Bruno Esperança and his thought provoking The last runtime buffer you'll ever need? The thing is, I wrote such a thing a few years ago, that's widely used by one of my clients. There's a few areas it could be improved

 

The Interface

 

interface zif_lookup

   public .

     constants c_dateto type fieldname value 'DATETO'. "#EC NOTEXT

 

   methods lookup

     exporting

       es_data type any

       eo_type type ref to cl_abap_structdescr

       e_notfound_flag type char1

     exceptions

       sql_error .


   methods set_key_val

     importing

       i_component type clike

       i_value type any .


   methods set_val_component

     importing

       i_component type clike .


   methods get_ref2_lookup

     returning

       value(rp_data) type ref to data

     exceptions

       sql_error .


   methods set_tim_val

     importing

       i_component type clike optional

       i_value type any .


   methods set_keyval_range

     importing

       it_keytab type any table .


   methods get_val_struc

     returning

       value(ro_valstruc) type ref to cl_abap_structdescr .


   methods get_notfound_flag

     returning

       value(r_notfound_flag) type flag .


endinterface.


Buffering in use


Well, the constructor is missing, so just showing you the interface doesn't really help! So, imagine a class with this attribute.

 

DATA: buffer TYPEREFTO zif_lookup.

 

Then we have a method in some class that reads materials from the MARC table, with i_matnr, i_werks, exporting e_bwtty and e_mmsta.

 

IF buffer IS NOT bound.

  CREATEOBJECT buffer TYPE zcl_table_lookup EXPORTING

     i_table_name = 'MARC'

     i_whole_tab = abap_false.

 

" The data I want to get back

  buffer->set_val_component( 'BWTTY' ).

  buffer->set_val_component( 'MMSTA' ).

ENDIF.

 

" The key data

buffer->set_key_val( i_component = 'MATNR' i_value = i_matnr )

buffer->set_key_val( i_component = 'WERKS' i_value = i_werks ).

 

" Now look it up.

DATA: BEGIN OF looked_up,

bwtty TYPE bwtty_d,

mmsta TYPE mmsta,

END OF looked up.

 

buffer->lookup( IMPORTING es_data = looked_up ).

e_bwtty = looked_up-bwtty.

e_mmsta = looed_up-mmsta.

 

What the class ZCL_TABLE_LOOKUP does is take the supplied components through set_val_component and set_key_val, and constructs two hashed tables (using RTTS) with the key components (in this case, MATNR and WERKS) as key. The first contains data looked up, the second contains data looked up and not found.

 

After the first lookup, you can't change the component fields.

 

If the i_whole_tab parameter is set, then the whole table will be buffered, rather than doing line by line buffering.

 

A couple of other implementations

 

I created a BW style look up, that uses the table look up above, and the same interface. It specialises into two further classes - one for looking up InfoObject master data, the other reading from a DSO.

 

Their constructors take an InfoObject name / DSO name, convert that to the underlying transparent tables, and instantiates a table lookup instance for this table. Oh - and for InfoObjects there's a flag on the constructor for whether the data is time dependent.

 

The component setting methods similarly take InfoObject names (instead of field names). The InfoObject names are then converted to field names, and passed to the table lookup instance.

 

What next?

 

I bet you want to know the details of the implementation. I will update this blog shortly with that information, but thought I'd give you something to whet your appetites. In any case, it's better this way than supplying the information as comments to Bruno's blog!

Caching with Decorator Pattern in ABAP

$
0
0

Hey SCN,

 

I was reading Bruno Esperança's post ( The last runtime buffer you'll ever need?) yesterday and it inspired me to think about the way I cache data in my own classes. I got to google-ing and found a nice blog about Caching with Decorator pattern and I thought I might give it a try in ABAP. I think the pattern works nicely for caching and as the author of Caching with Decorator pattern says:

I think this is a good way of applying caching, logging or any other things that you want to do before or after hitting your database.  It leaves your existing system in place and does not pollute your pure data access code (repositories) with other concerns.  In this case both classes have their own responsibilities, when it’s not in the cache the decorator class delegates the task to the repository and let it deal with the database.  Do I hear Single Responsibility Principal -


So lets jump right in to it. I just used the normal SAP example - SBOOK. For our fictitious program we just need to be able to select a single entry from SBOOK and we happen to know all the key fields.

 

I started with an interface:

INTERFACE ZIF_SBOOK_DB

PUBLIC.

METHODS:

     FIND_BY_KEY IMPORTING CARRID TYPE S_CARR_ID

                           CONNID TYPE S_CONN_ID

                           FLDATE TYPE S_DATE

                           BOOKID TYPE S_BOOK_ID

                 RETURNING VALUE(RS_SBOOK) TYPE SBOOK.

ENDINTERFACE.

 

Then I created the basic implementation - selecting from the database directly:

CLASS ZCL_SBOOK_DB_IMPL DEFINITION

PUBLIC

CREATE PUBLIC .

 

PUBLIC SECTION.

   INTERFACES: ZIF_SBOOK_DB.

PROTECTED SECTION.

PRIVATE SECTION.

ENDCLASS.

 

CLASS ZCL_SBOOK_DB_IMPL IMPLEMENTATION.

METHOD ZIF_SBOOK_DB~FIND_BY_KEY.

   SELECT SINGLE *

     INTO RS_SBOOK

     FROM SBOOK

     WHERE CARRID = CARRID

       AND CONNID = CONNID

       AND FLDATE = FLDATE

       AND BOOKID = BOOKID.

ENDMETHOD.

ENDCLASS.

 

Now we could just stop there... We have a perfectly good database layer and it meets the requirements of whatever fictitious program we are creating. Lets assume we have some performance problems, or maybe we just noticed in ST05 that the same query is being executed multiple times. This is where the decorator pattern comes in to play:

CLASS ZCL_SBOOK_DB_CACHE_DECORATOR DEFINITION

PUBLIC

FINAL

CREATE PUBLIC

INHERITING FROM ZCL_SBOOK_DB_IMPL.

 

PUBLIC SECTION.

   METHODS: ZIF_SBOOK_DB~FIND_BY_KEY REDEFINITION.

PROTECTED SECTION.

PRIVATE SECTION.

   DATA: _CACHE TYPE HASHED TABLE OF SBOOK WITH UNIQUE KEY CARRID CONNID FLDATE BOOKID.

ENDCLASS.

 

CLASS ZCL_SBOOK_DB_CACHE_DECORATOR IMPLEMENTATION.

METHOD ZIF_SBOOK_DB~FIND_BY_KEY.

   READ TABLE _CACHE INTO RS_SBOOK WITH KEY CARRID= CARRID CONNID = CONNID FLDATE = FLDATE BOOKID = BOOKID.

   IF SY-SUBRC NE 0.

     RS_SBOOK= SUPER->ZIF_SBOOK_DB~FIND_BY_KEY( CARRID = CARRID CONNID = CONNID FLDATE = FLDATE BOOKID = BOOKID ).

     INSERT RS_SBOOK INTO TABLE _CACHE.

   ENDIF.

ENDMETHOD.

ENDCLASS.

 

I think this is pretty easy to understand. We have defined a class that inherits from our basic implementation. It checks a private attribute (the cache) to see if it already has the item you need. If it doesn't have it, then it delegates to the super class - our basic implementation - and queries the database then puts the result in to the cache.

 

I see a couple of advantages in using the decorator pattern in this way to implement caching:

  • The buffering technique is not coupled to the implementation of the database layer. If I wanted to use shared memory objects instead of a private attribute that change would be easy to implement and I could be confident that it would not impact my existing database layer.
  • I can easily decide in any program I write whether or not I want to utilize the buffer. To buffer I instantiate an instance of zcl_sbook_db_cache_decorator and to ensure I always go directly to the database I instantiate an instance of zcl_sbook_db_impl.
  • I can add buffering to any existing database layer classes I may have already written without touching the existing (and proven!) code in those classes just by sub-classing them.

 

Finally, I decided I better test the performance. I was pretty confident that the cache would be faster, but I guess you never know:

REPORT Z_TEST_SBOOK_DB_LAYER.

 

DATA: T1 TYPE I,

     T2 TYPE I,

     TDIFF TYPE I.

 

DATA: LV_CARRID TYPE S_CARRID VALUE 'AA',

     LV_CONNID TYPE S_CONN_ID VALUE '17',

     LV_FLDATE TYPE S_DATE VALUE '20121031',

     LV_BOOKID TYPE S_BOOK_ID VALUE '23'.

 

DATA: LO_SBOOK_CACHE TYPE REF TO ZIF_SBOOK_DB.

CREATE OBJECT LO_SBOOK_CACHE TYPE ZCL_SBOOK_DB_CACHE_DECORATOR.

 

WRITE: /'First read from the cache decorator will be from the database.'.

SET RUN TIME CLOCK RESOLUTION HIGH.

GET RUN TIME FIELD T1.

 

LO_SBOOK_CACHE->FIND_BY_KEY( CARRID = LV_CARRID

                            CONNID= LV_CONNID

                            FLDATE= LV_FLDATE

                            BOOKID= LV_BOOKID ).

 

GET RUN TIME FIELD T2.

TDIFF= ( T2 - T1 ).

WRITE: /'It took ', TDIFF, ' microseconds to read from the database.'.

 

WRITE: /'Second read from the cache decorator will be from the cache.'.

GET RUN TIME FIELD T1.

 

LO_SBOOK_CACHE->FIND_BY_KEY( CARRID = LV_CARRID

                            CONNID= LV_CONNID

                            FLDATE= LV_FLDATE

                            BOOKID= LV_BOOKID ).

 

GET RUN TIME FIELD T2.

TDIFF= ( T2 - T1 ).

WRITE: /'It took ', TDIFF, ' microseconds to read from the cache.'.

And here are the results.

results.PNG

 

So as you can see, it's a bit of an improvement I hope you find this useful in your own development!

Search Remote Function Modules RFCs calls

$
0
0


Sometimes we need to search for the use of RFCs but Where-Used is only available if function modules exist in the caller system and/or client.


One solution is using ABAP source scan technique with available reports for your SAP platform and/or version. Depending of your search criteria this can take long processing time and spend system resources.

 

To be possible to use Where-Used I build this tool using existent repository information system functions to find nonexistent remote function modules calls. Try it, is very fast and support direct object code navigation.

 

 

How to implement:

  1. Create a executable report in your SAP development system with name ZNM_FIND_RFC and description Search RFCs calls using transaction SE38;
  2. Copy past code bellow and activate the program;
  3. Create/Fill corresponding Text Symbols and Selection Texts. Please check the meaning at top comments of the program.


If you are not able to activate the program because of nonexistent standard objects please downgrade for earlier SAP versions. If you need help on it, please kept me informed.


 

Copy past the code bellow or download the attached file:


REPORT znm_find_rfc MESSAGE-ID 00.

*----------------------------------------------------------------------*

* Created by NM to Search RFCs calls

*----------------------------------------------------------------------*

* Text Symbols

* A01 Description

* A02 Desc.

* A03 Sub Object

* B01 RFC Selection

* C04 Rotine

* M01 Not found

* M02 Error displaying result

*

* Selection Texts

* P_RFC Function

*

*----------------------------------------------------------------------*

* GLOBAL DATA

*----------------------------------------------------------------------*

TYPE-POOLS abap. "Only for old versions

 

*----------------------------------------------------------- Variables *

DATA gt_founds TYPE TABLE OF rsfindlst.                     "#EC NEEDED

 

*----------------------------------------------------------------------*

* CLASS gcl_handle_events DEFINITION

*----------------------------------------------------------------------*

CLASS gcl_handle_events DEFINITION FINAL.

 

  PUBLIC SECTION.

    METHODS on_double_click FOR EVENT double_click OF cl_salv_events_table

      IMPORTING row column.                                 "#EC NEEDED

 

ENDCLASS.                    "lcl_handle_events DEFINITION

 

*----------------------------------------------------------------------*

* SELECTION SCREEN

*----------------------------------------------------------------------*

*------------------------------------------------- RFC Function Module *

SELECTION-SCREEN BEGIN OF BLOCK b01 WITH FRAME TITLE text-b01.

SELECTION-SCREEN SKIP 1.

PARAMETERS p_rfc TYPE rs38l_fnam OBLIGATORY. "Name of Function Module

SELECTION-SCREEN SKIP 1.

SELECTION-SCREEN END OF BLOCK b01.

 

*----------------------------------------------------------------------*

* REPORT EVENTS

*----------------------------------------------------------------------*

START-OF-SELECTION.

  PERFORM search_rfc.

 

END-OF-SELECTION.

  PERFORM display_results.

 

*----------------------------------------------------------------------*

* CLASS lcl_handle_events IMPLEMENTATION

*----------------------------------------------------------------------*

CLASS gcl_handle_events IMPLEMENTATION.

 

*---------- Row dbclick ----------*

  METHOD on_double_click.

 

    DATA:

      ls_founds  LIKE LINE OF gt_founds,  "Found object

      lt_report  TYPE TABLE OF string,    "Report source code

      lt_results TYPE match_result_tab,   "Match results

      ls_results TYPE match_result.

 

*---------- Get selected line  ----------*

    READ TABLE gt_founds INTO ls_founds INDEX row.

    IF sy-subrc IS INITIAL.

 

*---------- Find position  ----------*

      READ REPORT ls_founds-object INTO lt_report.

      FIND p_rfc IN TABLE lt_report RESULTS lt_results.

      READ TABLE lt_results INTO ls_results INDEX 1.

 

*---------- Display objects ----------*

      CALL FUNCTION 'RS_TOOL_ACCESS'                        "#EC FB_RC

        EXPORTING

          operation           = 'SHOW'

          object_name         = ls_founds-object

          object_type         = 'PROG'

          position            = ls_results-line

        EXCEPTIONS

          not_executed        = 1

          invalid_object_type = 2

          OTHERS              = 3.

    ENDIF.

 

  ENDMETHOD.                    "on_double_click

 

ENDCLASS.                    "lcl_handle_events IMPLEMENTATION

 

*----------------------------------------------------------------------*

* FORMS

*----------------------------------------------------------------------*

 

*&---------------------------------------------------------------------*

*&      Form  SEARCH_RFC

*&---------------------------------------------------------------------*

FORM search_rfc .

 

  CONSTANTS lc_obj_type TYPE seu_obj VALUE 'FF'.

 

  DATA:

    lt_findstring TYPE TABLE OF rsfind,

    ls_findstring LIKE LINE OF lt_findstring.

 

  ls_findstring-object = p_rfc.

  APPEND ls_findstring TO lt_findstring.

 

  REFRESH gt_founds.

  CALL FUNCTION 'RS_EU_CROSSREF'                            "#EC FB_RC

    EXPORTING

      i_find_obj_cls           = lc_obj_type

      no_dialog                = abap_true

    TABLES

      i_findstrings            = lt_findstring

      o_founds                 = gt_founds

    EXCEPTIONS

      not_executed             = 1

      not_found                = 2

      illegal_object           = 3

      no_cross_for_this_object = 4

      batch                    = 5

      batchjob_error           = 6

      wrong_type               = 7

      object_not_exist         = 8

      OTHERS                   = 9.

 

  IF gt_founds IS INITIAL.

    MESSAGE s398 WITH text-m01 space space space DISPLAY LIKE 'W'.  "Not found

  ENDIF.

 

ENDFORM.                    " SEARCH_RFC

 

*&---------------------------------------------------------------------*

*&      Form  DISPLAY_RESULTS

*&---------------------------------------------------------------------*

FORM display_results .

 

  DATA:

    lo_results       TYPE REF TO cl_salv_table,             "ALV

    lr_functions     TYPE REF TO cl_salv_functions_list,    "ALV Functions

    lr_events        TYPE REF TO cl_salv_events_table,      "ALV Events

    lr_display       TYPE REF TO cl_salv_display_settings,  "ALV Output Appearance

    lr_columns       TYPE REF TO cl_salv_columns_table,     "ALV Columns

    lr_column        TYPE REF TO cl_salv_column_table,

    lr_selections    TYPE REF TO cl_salv_selections,        "ALV Selections

    lo_event_handler TYPE REF TO gcl_handle_events.         "ALV Events Handler

 

  DATA:

    lt_column_ref TYPE salv_t_column_ref, "Columns of ALV List

    ls_column_ref TYPE salv_s_column_ref.

 

  IF gt_founds IS NOT INITIAL.

    TRY.

*---------- Create ALV ----------*

        cl_salv_table=>factory( IMPORTING r_salv_table = lo_results CHANGING t_table = gt_founds ).

 

*---------- Set ALV selections ----------*

        lr_selections = lo_results->get_selections( ).

        lr_selections->set_selection_mode( if_salv_c_selection_mode=>single ).

 

*---------- Set ALV Display and Title ----------*

        lr_display = lo_results->get_display_settings( ).

        lr_display->set_striped_pattern( if_salv_c_bool_sap=>true ).

 

*---------- Set Functions ----------*

        lr_functions = lo_results->get_functions( ).

        lr_functions->set_export_localfile( ).

        lr_functions->set_filter( ).

        lr_functions->set_print( ).

        lr_functions->set_sort_asc( ).

        lr_functions->set_sort_desc( ).

        lr_functions->set_find( ).

        lr_functions->set_detail( ).

 

*---------- Set ALV Columns ----------*

        lr_columns = lo_results->get_columns( ).

        lr_columns->set_key_fixation( ).

        lr_columns->set_optimize( ).

        lt_column_ref = lr_columns->get( ).

 

        lr_columns->set_column_position( columnname = 'ENCL_OBJEC' position = 1 ).

        lr_columns->set_column_position( columnname = 'TEXTLINE'   position = 4 ).

 

        LOOP AT lt_column_ref INTO ls_column_ref. "Default format for all columns

          lr_column ?= lr_columns->get_column( ls_column_ref-columnname ).

          lr_column->set_f4( if_salv_c_bool_sap=>false ).

          lr_column->set_alignment( if_salv_c_alignment=>left ).

          lr_column->set_visible( if_salv_c_bool_sap=>false ).

          lr_column->set_technical( if_salv_c_bool_sap=>true ).

 

          IF ls_column_ref-columnname = 'ENCL_OBJEC' OR ls_column_ref-columnname = 'OBJECT' OR

             ls_column_ref-columnname = 'PROGRAM'.

 

            CASE ls_column_ref-columnname.

              WHEN 'OBJECT'.  "Sub Object

                lr_column->set_long_text( text-a03 ).

                lr_column->set_medium_text( text-a03 ).

                lr_column->set_short_text( text-a03 ).

              WHEN 'PROGRAM'. "Rotine

                lr_column->set_long_text( text-c04 ).

                lr_column->set_medium_text( text-c04 ).

                lr_column->set_short_text( text-c04 ).

            ENDCASE.

 

            lr_column->set_key( if_salv_c_bool_sap=>true ).

            lr_column->set_visible( if_salv_c_bool_sap=>true ).

            lr_column->set_technical( if_salv_c_bool_sap=>false ).

          ENDIF.

 

          IF ls_column_ref-columnname = 'OBJECT_CLS'.

            lr_column->set_key( if_salv_c_bool_sap=>true ).

            lr_column->set_visible( if_salv_c_bool_sap=>true ).

            lr_column->set_alignment( if_salv_c_alignment=>centered ).

            lr_column->set_technical( if_salv_c_bool_sap=>false ).

          ENDIF.

 

          IF ls_column_ref-columnname = 'TEXTLINE'. "Description

            lr_column->set_long_text( text-a01 ).

            lr_column->set_medium_text( text-a01 ).

            lr_column->set_short_text( text-a02 ).

 

            lr_column->set_visible( if_salv_c_bool_sap=>true ).

            lr_column->set_alignment( if_salv_c_alignment=>left ).

            lr_column->set_technical( if_salv_c_bool_sap=>false ).

          ENDIF.

        ENDLOOP.

 

*---------- Register ALV Events ----------*

        lr_events = lo_results->get_event( ).

        CREATE OBJECT lo_event_handler.

        SET HANDLER lo_event_handler->on_double_click FOR lr_events.

 

*---------- Display Objects ALV ----------*

        lo_results->display( ).

 

      CATCH cx_root.                                     "#EC CATCH_ALL

        MESSAGE s398 WITH text-m02 space space space DISPLAY LIKE 'E'.  "Error displaying result

    ENDTRY.

  ENDIF.

 

ENDFORM.                    " DISPLAY_RESULTS

 

 

Selection screen layout after implementation:

RFC.png

Result ALV layout:

RFC2.png

 

How to use:

ZNM_FIND_RFC is an IT tool to be used only for nonexistent remote function modules. Please use standard Where-Used functionality for all others situations.

  1. Just fill RFC name and execute;
  2. Check result objects and double click rows to navigate to objects source code where RFC is being used.

 

 

Nuno Morais

WebWork 2014


Viewing all 943 articles
Browse latest View live


<script src="https://jsc.adskeeper.com/r/s/rssing.com.1596347.js" async> </script>