Thursday, February 27, 2020

Common Oracle DBA Tasks on Amazon RDS | In One Page!

In Amazon RDS you are not allowed to execute the following commands:
- ALTER DATABASE
- ALTER SYSTEM
- GRANT ANY ROLE/PRIVILEGE
- DROP/CREATE ANY DIRECTORY 
 
This is because AWS doesn't want you to mess up with their DB instance --which they manage, instead; they provide packages under rdsadmin schema to help you execute the DBA common tasks without the need to execute the above mentioned commands.
 
In this post I'll explain all the common admin commands that you will use frequently when managing Oracle on RDS.

Before I start please note that the commands in navy color represent the native Oracle commands --just thought to included them to make this article coherent and handy for you, and the commands in green color are the new RDS related ones.

Sessions Management:

-- Check the active Sessions:
SQL> select
substr(s.INST_ID||'|'||s.USERNAME||'| '||s.sid||','||s.serial#||' |'||substr(s.MACHINE,1,22)||'|'||substr(s.MODULE,1,18),1,69)"INS|USER|SID,SER|MACHIN|MODUL"
,substr(s.status||'|'||round(w.WAIT_TIME_MICRO/1000000)||'|'||LAST_CALL_ET||'|'||to_char(LOGON_TIME,'ddMon HH24:MI'),1,40) "ST|WAITD|ACT_SINC|LOGIN"
,substr(w.event,1,24) "EVENT"
,s.SQL_ID||'|'||round(w.TIME_REMAINING_MICRO/1000000) "CURRENT SQL|REMIN_SEC"
,s.FINAL_BLOCKING_INSTANCE||'|'||s.FINAL_BLOCKING_SESSION "I|BLK_BY"
from     gv$session s, gv$session_wait w where s.USERNAME is not null and s.sid=w.sid and s.STATUS='ACTIVE'
--and    w.EVENT NOT IN ('SQL*Net message from client','class slave wait','Streams AQ: waiting for messages in the queue','Streams capture: waiting for archive log','Streams AQ: waiting for time management or cleanup tasks','PL/SQL lock timer','rdbms ipc message')
order by "I|BLK_BY" desc,w.event,"INS|USER|SID,SER|MACHIN|MODUL","ST|WAITD|ACT_SINC|LOGIN" desc,"CURRENT SQL|REMIN_SEC";


-- Disconnect a session:
SQL> EXEC rdsadmin.rdsadmin_util.disconnect(<SID>, <SERIAL#>, 'IMMEDIATE');

-- Cancel Statement: [18c+]
 SQL> EXEC rdsadmin.rdsadmin_util.cancel(<SID>, <SERIAL#>, '<SQLID>');

-- Enable Restricted Sessions:
SQL> EXEC rdsadmin.rdsadmin_util.restricted_session(p_enable => true);
         
select logins from v$instance;


-- Disable Restricted Sessions:
SQL> EXEC rdsadmin.rdsadmin_util.restricted_session(p_enable => false);

-- List the MASTER BLOCKING Sessions:
SQL> select /*+RULE*/
substr(s.INST_ID||'|'||s.OSUSER||'/'||s.USERNAME||'| '||s.sid||','||s.serial#||' |'||substr(s.MACHINE,1,22)||'|'||substr(s.MODULE,1,18),1,75)"I|OS/DB USER|SID,SER|MACHN|MOD"
,substr(s.status||'|'||round(w.WAIT_TIME_MICRO/1000000)||'|'||LAST_CALL_ET||'|'||to_char(LOGON_TIME,'ddMon HH24:MI'),1,34) "ST|WAITD|ACT_SINC|LOGIN"
,substr(w.event,1,24) "EVENT"
,s.PREV_SQL_ID||'|'||s.SQL_ID||'|'||round(w.TIME_REMAINING_MICRO/1000000) "PREV|CURRENT_SQL|REMAIN_SEC"
from    gv$session s, gv$session_wait w
where    s.sid in (select distinct FINAL_BLOCKING_SESSION from gv$session where FINAL_BLOCKING_SESSION is not null)
and       s.USERNAME is not null
and     s.sid=w.sid
and    s.FINAL_BLOCKING_SESSION is null;


-- KILL the MASTER BLOCKING SESSION: [I suppose you already know what you are doing :-)]
SQL> col "KILL MASTER BLOCKING SESSION"    for a75
select /*+RULE*/ 'EXEC rdsadmin.rdsadmin_util.disconnect( '||s.sid||','||s.serial#|| ',''IMMEDIATE'');' "KILL MASTER BLOCKING SESSION"
from     gv$session s
where    s.sid in (select distinct FINAL_BLOCKING_SESSION from gv$session where FINAL_BLOCKING_SESSION is not null)
and       s.USERNAME is not null and s.FINAL_BLOCKING_SESSION is null;


-- List of Victim LOCKED Sessions:
SQL> select /*+RULE*/
substr(s.INST_ID||'|'||s.USERNAME||'| '||s.sid||','||s.serial#||' |'||substr(s.MACHINE,1,22)||'|'||substr(s.MODULE,1,18),1,65)"INS|USER|SID,SER|MACHIN|MODUL"
,substr(w.state||'|'||round(w.WAIT_TIME_MICRO/1000000)||'|'||LAST_CALL_ET||'|'||to_char(LOGON_TIME,'ddMon'),1,38) "WA_ST|WAITD|ACT_SINC|LOG_T"
,substr(w.event,1,24) "EVENT"
,s.FINAL_BLOCKING_INSTANCE||'|'||s.FINAL_BLOCKING_SESSION "I|BLKD_BY"
from    gv$session s, gv$session_wait w
where   s.USERNAME is not null
and     s.FINAL_BLOCKING_SESSION is not null
and     s.sid=w.sid
and     s.STATUS='ACTIVE'
--and     w.EVENT NOT IN ('SQL*Net message from client','class slave wait','Streams AQ: waiting for messages in the queue','Streams capture: waiting for archive log'
--        ,'Streams AQ: waiting for time management or cleanup tasks','PL/SQL lock timer','rdbms ipc message')
order by "I|BLKD_BY" desc,w.event,"INS|USER|SID,SER|MACHIN|MODUL","WA_ST|WAITD|ACT_SINC|LOG_T" desc;


-- Lock Chain Analysis:
SQL> select /*+RULE*/ 'User: '||s1.username || ' | ' || s1.module || '(SID=' || s1.sid ||' ) running SQL_ID:'||s1.sql_id||'  is blocking
User: '|| s2.username || ' | ' || s2.module || '(SID=' || s2.sid || ') running SQL_ID:'||s2.sql_id||' For '||round(s2.WAIT_TIME_MICRO/1000000/60,0)||' Minutes' AS blocking_status
from gv$LOCK l1, gv$SESSION s1, gv$LOCK l2, gv$SESSION s2
 where s1.sid=l1.sid and s2.sid=l2.sid
 and l1.BLOCK=1 and l2.request > 0
 and l1.id1 = l2.id1
 and l2.id2 = l2.id2
 order by s2.WAIT_TIME_MICRO desc;


-- Blocking Locks On Object Level:
SQL> select  /*+RULE*/ OWNER||'.'||OBJECT_NAME "LOCKED_OBJECT", ORACLE_USERNAME||' | '||lo.OS_USER_NAME "LOCK HOLDER: DB_USER | OS_USER",
l.sid||' | '|| lo.PROCESS "DB_SID | OS_PID",decode(TYPE,'MR', 'Media Recovery',
'RT', 'Redo Thread','UN', 'User Name','TX', 'Transaction','TM', 'DML','UL', 'PL/SQL User Lock','DX', 'Distributed Xaction','CF',
'Control File','IS', 'Instance State','FS', 'File Set','IR', 'Instance Recovery','ST', 'Disk Space Transaction','TS', 'Temp Segment'
,'IV', 'Library Cache Invalidation','LS', 'Log Start or Switch','RW', 'Row Wait','SQ', 'Sequence Number','TE', 'Extend Table','TT',
'Temp Table', type)||' | '||
decode(LMODE, 0, 'None',1, 'Null',2, 'row share lock',3, 'row exclusive lock',4, 'Share',5, '(SSX)exclusive lock',6, 'Exclusive', lmode) lock_type,
l.CTIME LOCK_HELD_SEC,
decode(REQUEST,0, 'None',1, 'Null',2, 'row share lock',3, 'row exclusive lock',4, 'Share',5, '(SSX)exclusive lock',6, 'Exclusive', request) lock_requested,
decode(BLOCK,0, 'Not Blocking',1, 'Blocking',2, 'Global', block)
status from    v$locked_object lo, dba_objects do, v$lock l
where   lo.OBJECT_ID = do.OBJECT_ID AND     l.SID = lo.SESSION_ID AND l.BLOCK='1'
order by OWNER,OBJECT_NAME;


-- Long Running Operations:
SQL> select USERNAME||'| '||SID||','||SERIAL# "USERNAME| SID,SERIAL#",SQL_ID,round(SOFAR/TOTALWORK*100,2) "%DONE"
        ,to_char(START_TIME,'DD-Mon HH24:MI')||'| '||trunc(ELAPSED_SECONDS/60)||'|'||trunc(TIME_REMAINING/60) "STARTED|MIN_ELAPSED|REMAIN" ,MESSAGE
        from v$session_longops
    where SOFAR/TOTALWORK*100 <>'100'and TOTALWORK <> '0'
        order by "STARTED|MIN_ELAPSED|REMAIN" desc, "USERNAME| SID,SERIAL#";



Users Management:

-- Grant permission on SYS tables/views: [With GRANT option]
SQL> EXEC rdsadmin.rdsadmin_util.grant_sys_object(p_obj_name  => 'V_$SESSION', p_grantee => 'USER1', p_privilege => 'SELECT', p_grant_option => true);

-- REVOKE permission from a user on SYS object:
SQL> EXEC rdsadmin.rdsadmin_util.revoke_sys_object(p_obj_name  => 'V_$SESSION', p_revokee  => 'USER1', p_privilege => 'SELECT');

-- Grant SELECT on Dictionary:
SQL> grant SELECT_CATALOG_ROLE to user1;

-- Grant EXECUTE on Dictionary:
SQL> grant EXECUTE_CATALOG_ROLE to user1;

-- Create Password Verify Function:
Note: As you cannot create objects under SYS in RDS you have to use the following ready made procedure by AWS to create the Verify Function:
Note: The verify function name should contains one of these keywords: "PASSWORD", "VERIFY", "COMPLEXITY", "ENFORCE", or "STRENGTH"

SQL> begin
    rdsadmin.rdsadmin_password_verify.create_verify_function(
        p_verify_function_name         => 'CUSTOM_PASSWORD_VFY_FUNCTION',
        p_min_length                   => 8,
    p_max_length            => 256,
    p_min_letters            => 1,
    p_min_lowercase            => 1,
        p_min_uppercase                => 1,
        p_min_digits                   => 3,
        p_min_special                  => 2,
    p_disallow_simple_strings    => true,
    p_disallow_whitespace        => true,
    p_disallow_username        => true,
    p_disallow_reverse        => true,
    p_disallow_db_name        => true,
        p_disallow_at_sign             => false);
end;
/



Directory/S3 Management: [Files/Upload/Download]

-- Show all files under DATA_PUMP_DIR directory:
SQL> SELECT * from table(RDSADMIN.RDS_FILE_UTIL.LISTDIR('DATA_PUMP_DIR')) order by mtime;

-- Read a logfile under BDUMP directory:
SQL> SELECT text FROM table(rdsadmin.rds_file_util.read_text_file('BDUMP','dbtask-1580377405757-6339.log'));

-- Read an import log under DATA_PUMP_DIR directory:
SQL> SELECT text FROM table(rdsadmin.rds_file_util.read_text_file('DATA_PUMP_DIR','import_schema.LOG'));     

-- Create a new Directory: [i.e. BKP_DIR]
SQL> EXEC rdsadmin.rdsadmin_util.create_directory(p_directory_name => 'BKP_DIR');

-- Delete a Directory:
Note: Deleting a directory will not delete the underlying files and they will keep utilizing space, so what shall I do?

First, list all the files under the directory to be deleted and delete them one by one: [Die Hard method]

SQL> SELECT * from table(RDSADMIN.RDS_FILE_UTIL.LISTDIR('BKP_DIR')) order by mtime;
SQL> EXEC SYS.UTL_FILE.FREMOVE ('BKP_DIR','import.LOG');

Then, delete the Directory:
SQL> EXEC rdsadmin.rdsadmin_util.drop_directory(p_directory_name => 'BKP_DIR');

-- Upload a file to S3 bucket:
SQL> SELECT rdsadmin.rdsadmin_s3_tasks.upload_to_s3(
    p_bucket_name => '<bucket_name>',    --bucket name where you want to upload to
    p_prefix => '<file_name>',        --File name you want to upload
    prefix => '',
    p_directory_name => 'DATA_PUMP_DIR')    --Directory Name you want to upload from
     AS TASK_ID FROM DUAL;



-- Download all files exist in S3 bucket:
SELECT rdsadmin.rdsadmin_s3_tasks.download_from_s3(
      p_bucket_name    =>  'my-bucket',                  
--bucket name where you want to download from.
      p_directory_name =>  'DATA_PUMP_DIR') --Directory Name you want to download to.
   AS TASK_ID FROM DUAL; 


-- Download all files exist inside a named folder in S3 bucket: SELECT rdsadmin.rdsadmin_s3_tasks.download_from_s3(
      p_bucket_name    =>  'my-bucket',      --bucket name where you want to download from.
      p_s3_prefix          =>  'export_files/',    --All files under this folder will be downloaded [don't forget the slash / after the directory name]
      p_directory_name =>  'DATA_PUMP_DIR') --Directory Name you want to download to.
   AS TASK_ID FROM DUAL;

-- Download one named file exist under a folder from an S3 bucket:
SELECT rdsadmin.rdsadmin_s3_tasks.download_from_s3(
      p_bucket_name    =>  'my-bucket',     
--bucket name where you want to download from.
      p_s3_prefix          =>  'prod_db/export_files',  --Folder name full path
     
p_prefix                =>  'EXPORT_STG_04-03-19.dmp', --file name
      p_directory_name =>  'DATA_PUMP_DIR')
--Directory Name you want to download to.
    AS TASK_ID FROM DUAL;


-- Check the download progress: [By TaskID provided from above commands i.e. 1580377405757-6339]
SQL> SELECT text FROM table(rdsadmin.rds_file_util.read_text_file('BDUMP','dbtask-1580377405757-6339.log'));
 


-- Delete a file from DATA_PUMP_DIR directory: [%U all the files ending with 01,02,03,etc ...]
SQL> EXEC utl_file.fremove('DATA_PUMP_DIR','export_RA%U.dmp');

-- Rename a file under DATA_PUMP_DIR directory:
SQL> EXEC UTL_FILE.FRENAME('DATA_PUMP_DIR', '<Original_filename>', 'DATA_PUMP_DIR', '<New_filename>', TRUE);

  
Database Management:

-- Enable Force Logging:
SQL> EXEC rdsadmin.rdsadmin_util.force_logging(p_enable => true );

-- Disable Force Logging:
SQL> EXEC rdsadmin.rdsadmin_util.force_logging(p_enable => false);

-- Flush Shared Pool:
SQL> EXEC rdsadmin.rdsadmin_util.flush_shared_pool;

-- Flush Buffer Cache:
SQL> EXEC rdsadmin.rdsadmin_util.flush_buffer_cache;

-- Force a Checkpoint:
SQL> EXEC rdsadmin.rdsadmin_util.checkpoint;

-- Switch REDOLOG: [remember ALTER SYSTEM is restricted]
SQL> EXEC rdsadmin.rdsadmin_util.switch_logfile;

-- View REDOLOG switches per hour: [Last 24hours]
SQL> SELECT to_char(first_time,'YYYY-MON-DD') day,
to_char(sum(decode(to_char(first_time,'HH24'),'00',1,0)),'9999') "00",
to_char(sum(decode(to_char(first_time,'HH24'),'01',1,0)),'9999') "01",
to_char(sum(decode(to_char(first_time,'HH24'),'02',1,0)),'9999') "02",
to_char(sum(decode(to_char(first_time,'HH24'),'03',1,0)),'9999') "03",
to_char(sum(decode(to_char(first_time,'HH24'),'04',1,0)),'9999') "04",
to_char(sum(decode(to_char(first_time,'HH24'),'05',1,0)),'9999') "05",
to_char(sum(decode(to_char(first_time,'HH24'),'06',1,0)),'9999') "06",
to_char(sum(decode(to_char(first_time,'HH24'),'07',1,0)),'9999') "07",
to_char(sum(decode(to_char(first_time,'HH24'),'08',1,0)),'9999') "08",
to_char(sum(decode(to_char(first_time,'HH24'),'09',1,0)),'9999') "09",
to_char(sum(decode(to_char(first_time,'HH24'),'10',1,0)),'9999') "10",
to_char(sum(decode(to_char(first_time,'HH24'),'11',1,0)),'9999') "11",
to_char(sum(decode(to_char(first_time,'HH24'),'12',1,0)),'9999') "12",
to_char(sum(decode(to_char(first_time,'HH24'),'13',1,0)),'9999') "13",
to_char(sum(decode(to_char(first_time,'HH24'),'14',1,0)),'9999') "14",
to_char(sum(decode(to_char(first_time,'HH24'),'15',1,0)),'9999') "15",
to_char(sum(decode(to_char(first_time,'HH24'),'16',1,0)),'9999') "16",
to_char(sum(decode(to_char(first_time,'HH24'),'17',1,0)),'9999') "17",
to_char(sum(decode(to_char(first_time,'HH24'),'18',1,0)),'9999') "18",
to_char(sum(decode(to_char(first_time,'HH24'),'19',1,0)),'9999') "19",
to_char(sum(decode(to_char(first_time,'HH24'),'20',1,0)),'9999') "20",
to_char(sum(decode(to_char(first_time,'HH24'),'21',1,0)),'9999') "21",
to_char(sum(decode(to_char(first_time,'HH24'),'22',1,0)),'9999') "22",
to_char(sum(decode(to_char(first_time,'HH24'),'23',1,0)),'9999') "23"
from v$log_history where first_time > sysdate-1
GROUP by to_char(first_time,'YYYY-MON-DD') order by 1 asc;


-- Add new REDO LOG Group: [contains 1 redolog file inside with 1GB size (default is 128M)]
SQL> EXEC rdsadmin.rdsadmin_util.add_logfile(p_size => '1G');

-- Drop REDO LOG Group: [REDOLOG Group# 1]
    -- Force a Checkpoint:
        SQL> EXEC rdsadmin.rdsadmin_util.checkpoint;
    -- Switch REDOLOG: [ALTER SYSTEM is restricted]
        SQL> EXEC rdsadmin.rdsadmin_util.switch_logfile;

SQL> EXEC rdsadmin.rdsadmin_util.drop_logfile(1);

-- Set DEFAULT TABLESPACE for database:
SQL> EXEC rdsadmin.rdsadmin_util.alter_default_tablespace(tablespace_name => 'example');

-- Set DEFAULT TEMPORARY TABLESPACE for database:
SQL> EXEC rdsadmin.rdsadmin_util.alter_default_temp_tablespace(tablespace_name => 'temp2');

-- Resize TEMPORARY tablespace in a [Read Replica]:
SQL> EXEC rdsadmin.rdsadmin_util.resize_temp_tablespace('TEMP','4G');

-- Checking the Current retention for ARCHIVELOGS and TRACE Files:
SQL> set serveroutput on
     EXEC rdsadmin.rdsadmin_util.show_configuration;  
            

-- Increasing the Archivelog retention (to be kept on disk) to 1 day [24 hours]: [Default is 0]         
SQL> EXEC rdsadmin.rdsadmin_util.set_configuration(name  => 'archivelog retention hours', value => '24');

-- Add supplemental log: [For goldengate or any replication tool]
SQL> EXEC rdsadmin.rdsadmin_util.alter_supplemental_logging('ADD','ALL');

-- Change DB timezone:
SQL> EXEC rdsadmin.rdsadmin_util.alter_db_time_zone(p_new_tz => 'Asia/Dubai');

-- Collect AWR report: [On Enterprise Edition and make sure you acquired Diagnostic+Tuning license]
copy the script content of $ORACLE_HOME/rdbms/admin/awrrpt from any database server you have [On premises or EC2] and run it on RDS database.

-- Change the default Application Edition:
SQL> EXEC rdsadmin.rdsadmin_util.alter_default_edition('APP_JAN2020');

-- Reset back to the default Application Edition:
SQL> EXEC rdsadmin.rdsadmin_util.alter_default_edition('ORA$BASE');


-- Gather Statistics:

--Gather Full DB Statistics:
SQL> BEGIN
     DBMS_STATS.GATHER_DATABASE_STATS(
     ESTIMATE_PERCENT    => DBMS_STATS.AUTO_SAMPLE_SIZE,
     --METHOD_OPT    => 'FOR ALL COLUMNS SIZE SKEWONLY',
     CASCADE         => TRUE,
     degree         => DBMS_STATS.AUTO_DEGREE,
     GATHER_SYS     => TRUE
     ,OPTIONS         => 'GATHER STALE'
     );
END;
/     


--Gather Full Schema Statistics:
SQL> BEGIN
     DBMS_STATS.GATHER_SCHEMA_STATS (
     ownname         => 'SCOTT',
     estimate_percent    => DBMS_STATS.AUTO_SAMPLE_SIZE,
     degree        => DBMS_STATS.AUTO_DEGREE,
     cascade        => TRUE
     --,OPTIONS     => 'GATHER STALE'
     );
END;
/       
 

--Gather one Table Statistics: (+Its indexes)
SQL> BEGIN
     DBMS_STATS.GATHER_TABLE_STATS (
     ownname         => 'SCOTT',
     tabname         => 'EMP',
     degree         => DBMS_STATS.AUTO_DEGREE,
     cascade         => TRUE,
     METHOD_OPT     => 'FOR COLUMNS SIZE AUTO',
     estimate_percent     => DBMS_STATS.AUTO_SAMPLE_SIZE
     );
END;
/         

                   
-- Re-Compile invalid objects:

--Check for invalid objects:
SQL> select object_name,object_type,status from dba_objects where status<>'VALID';

--Compile invalid objects on Full DB: (4 is the degree of parallelism, you can changed it based on system CPU power)
SQL> EXEC SYS.UTL_RECOMP.recomp_parallel(4);
 
    Or compile all DB objects Serially: (No Parallelism- It's efficently compile objects better than parallel mode but slower)
    SQL> EXECUTE SYS.UTL_RECOMP.RECOMP_SERIAL();

--Compile invalid objects on Full Schema: (4 is the degree of parallelism you can changed it based on system CPU power)
SQL> EXEC SYS.UTL_RECOMP.recomp_parallel(4, 'SCOTT');


Storage Management:

-- Tablespace Size:
SQL> select tablespace_name,
       round((tablespace_size*8192)/(1024*1024)) Total_MB,
       round((used_space*8192)/(1024*1024))      Used_MB,
       round(used_percent,2)              "%Used"
from dba_tablespace_usage_metrics;


-- Datafiles:
SQL> select tablespace_name,file_name,bytes/1024/1024 "Size",maxbytes/1024/1024 "Max_Size" from dba_data_files
union select tablespace_name,file_name,bytes/1024/1024 "Size",maxbytes/1024/1024 "Max_Size" from dba_temp_files order by 1;



Objects Management:

-- Object Size:
SQL> col SEGMENT_NAME format a30
SELECT /*+RULE*/ SEGMENT_NAME, TABLESPACE_NAME, SEGMENT_TYPE OBJECT_TYPE, ROUND(SUM(BYTES/1024/1024)) OBJECT_SIZE_MB
FROM   SYS.DBA_SEGMENTS
WHERE  OWNER =        upper('SPRINT_STAGE1')
AND    SEGMENT_NAME = upper('E_INTERLINE_MSGS20')
GROUP  BY SEGMENT_NAME, TABLESPACE_NAME, SEGMENT_TYPE;
--Lob Size:
SELECT /*+RULE*/ SEGMENT_NAME LOB_NAME, TABLESPACE_NAME, ROUND(SUM(BYTES/1024/1024)) OBJECT_SIZE_MB
FROM   SYS.DBA_SEGMENTS
WHERE  SEGMENT_NAME in (select /*+RULE*/ SEGMENT_NAME from dba_lobs where
owner=     upper('SPRINT_STAGE1') and
table_name=UPPER('E_INTERLINE_MSGS20'))
GROUP  BY SEGMENT_NAME, TABLESPACE_NAME;
--Indexes:
SELECT /*+RULE*/ SEGMENT_NAME INDEX_NAME, TABLESPACE_NAME, ROUND(SUM(BYTES/1024/1024)) OBJECT_SIZE_MB
FROM   SYS.DBA_SEGMENTS
WHERE  OWNER = upper('SPRINT_STAGE1')
AND    SEGMENT_NAME in (select index_name from dba_indexes where
owner=     upper('SPRINT_STAGE1') and
table_name=UPPER('E_INTERLINE_MSGS20'))
GROUP  BY SEGMENT_NAME, TABLESPACE_NAME;


-- Table Info:
SQL> set linesize 190
col "OWNER.TABLE"     for a35
col tablespace_name     for a25
col PCT_FREE        for 99999999
col PCT_USED        for 99999999
col "STATS_LOCKED|STALE|DATE"    for a23
col "READONLY" for a8
select t.owner||'.'||t.table_name "OWNER.TABLE",t.TABLESPACE_NAME,t.PCT_FREE
,t.PCT_USED,d.extents,t.MAX_EXTENTS,t.COMPRESSION,t.STATUS,o.created,s.stattype_locked||'|'||s.stale_stats||'|'||s.LAST_ANALYZED "STATS_LOCKED|STALE|DATE"
from dba_tables t, dba_objects o, dba_segments d, dba_tab_statistics s
where t.owner=     upper('SPRINT_STAGE1')
and t.table_name = upper('E_INTERLINE_MSGS20')
and o.owner=t.owner and o.object_name=t.table_name and o.owner=d.owner and t.table_name=d.SEGMENT_NAME and o.owner=s.owner and t.table_name=s.table_name;


-- Getting Table Size [TABLE + ITS LOBS + ITS INDEXES]...
SQL> col Name for a30
col Type for a30
set heading on echo off
COLUMN TABLE_NAME FORMAT A32
COLUMN OBJECT_NAME FORMAT A32
COLUMN OWNER FORMAT A30
SELECT owner, table_name, TRUNC(sum(bytes)/1024/1024) TOTAL_SIZE_MB FROM
(SELECT segment_name table_name, owner, bytes FROM dba_segments WHERE segment_type = 'TABLE'
 UNION ALL
 SELECT i.table_name, i.owner, s.bytes FROM dba_indexes i, dba_segments s WHERE s.segment_name = i.index_name AND   s.owner = i.owner AND s.segment_type = 'INDEX'
 UNION ALL
 SELECT l.table_name, l.owner, s.bytes FROM dba_lobs l, dba_segments s WHERE s.segment_name = l.segment_name AND s.owner = l.owner AND s.segment_type = 'LOBSEGMENT'
 UNION ALL
 SELECT l.table_name, l.owner, s.bytes FROM dba_lobs l, dba_segments s WHERE s.segment_name = l.index_name AND s.owner = l.owner AND s.segment_type = 'LOBINDEX')
 WHERE owner=       UPPER('SPRINT_STAGE1')
 and   table_name = UPPER('E_INTERLINE_MSGS20')
 GROUP BY table_name, owner
 ORDER BY SUM(bytes) desc;


-- INDEXES On the Table:
SQL> set pages 100 feedback on
set heading on
COLUMN OWNER FORMAT A25 heading "Index Owner"
COLUMN INDEX_NAME FORMAT A35 heading "Index Name"
COLUMN COLUMN_NAME FORMAT A30 heading "On Column"
COLUMN COLUMN_POSITION FORMAT 9999 heading "Pos"
COLUMN "INDEX" FORMAT A40
COLUMN TABLESPACE_NAME FOR A25
COLUMN INDEX_TYPE FOR A15
SELECT IND.OWNER||'.'||IND.INDEX_NAME "INDEX", IND.INDEX_TYPE, COL.COLUMN_NAME, COL.COLUMN_POSITION,IND.TABLESPACE_NAME,IND.STATUS,IND.UNIQUENESS,
       S.stattype_locked||'|'||S.stale_stats||'|'||S.LAST_ANALYZED "STATS_LOCKED|STALE|DATE"
FROM   SYS.DBA_INDEXES IND, SYS.DBA_IND_COLUMNS COL, SYS.DBA_IND_STATISTICS S
WHERE  IND.TABLE_NAME =  upper('OA_FREQUENT_FLYERS')
AND    IND.TABLE_OWNER = upper('SPRINT_STAGE1')
AND    IND.TABLE_NAME = COL.TABLE_NAME AND IND.OWNER = COL.INDEX_OWNER AND IND.TABLE_OWNER = COL.TABLE_OWNER AND IND.INDEX_NAME = COL.INDEX_NAME
AND    IND.OWNER = S.OWNER AND IND.INDEX_NAME = S.INDEX_NAME;


-- CONSTRAINTS On a Table:
SQL> col type format a10
col constraint_name format a40
COL COLUMN_NAME FORMAT A25 heading "On Column"
select    decode(d.constraint_type,'C', 'Check','O', 'R/O View','P', 'Primary','R', 'Foreign','U', 'Unique','V', 'Check view') type
,d.constraint_name, c.COLUMN_NAME, d.status,d.last_change
from    dba_constraints d, dba_cons_columns c
where    d.owner =      upper('SPRINT_STAGE1')
and    d.table_name = upper('E_INTERLINE_MSGS20')
and    d.OWNER=c.OWNER and d.CONSTRAINT_NAME=c.CONSTRAINT_NAME
order by 1;



Block Corruption on RDS: [Skipping Corrupted Blocks procedure]

In case you have a corrupted blocks on a table/index whenever any query try to access those corrupted blocks it will keep getting ORA-1578

From the error message try to find the corrupted object_name and its owner :
SQL> Select relative_fno,owner,segment_name,segment_type
from dba_extents
where
file_id = <DATAFILE_NUMBER_IN_THE_ERROR_MESSAGE_HERE>
and
<CORRUPTED_BLOCK_NUMBER_IN_THE_ERROR_MESSAGE_HERE> between block_id and block_id + blocks - 1;


Next follow the next steps to SKIP the corrupted blocks on the underlying object to allow queries running against to succeed:

-- Create REPAIR tables:
exec rdsadmin.rdsadmin_dbms_repair.create_repair_table;
exec rdsadmin.rdsadmin_dbms_repair.create_orphan_keys_table;                       
exec rdsadmin.rdsadmin_dbms_repair.purge_repair_table;
exec rdsadmin.rdsadmin_dbms_repair.purge_orphan_keys_table;


-- Check Corrupted blocks on the corrupted object and populate them in the REPAIR tables:
set serveroutput on
declare v_num_corrupt int;
begin
  v_num_corrupt := 0;
  rdsadmin.rdsadmin_dbms_repair.check_object (
    schema_name => '&corrupted_Object_Owner',
    object_name => '&corrupted_object_name',
    corrupt_count =>  v_num_corrupt
  );
dbms_output.put_line('number corrupt: '||to_char(v_num_corrupt));
end;
/


col corrupt_description format a30
col repair_description format a30
select object_name, block_id, corrupt_type, marked_corrupt, corrupt_description, repair_description from sys.repair_table;


select skip_corrupt from dba_tables where owner = upper('&corrupted_Object_Owner') and table_name = upper('&corrupted_object_name');                  
                                       

-- Enable the CORRUPTION SKIPPING on the corrupted object:
begin
  rdsadmin.rdsadmin_dbms_repair.skip_corrupt_blocks (
    schema_name => '&corrupted_Object_Owner',
    object_name => '&corrupted_object_name',
    object_type => rdsadmin.rdsadmin_dbms_repair.table_object,
    flags => rdsadmin.rdsadmin_dbms_repair.skip_flag);
end;
/


select skip_corrupt from dba_tables where owner =  upper('&corrupted_Object_Owner') and table_name = upper('&corrupted_object_name');        

              
-- Disable the CORRUPTION SKIPPING on the corrupted object:

begin
  rdsadmin.rdsadmin_dbms_repair.skip_corrupt_blocks (
    schema_name => '&corrupted_Object_Owner',
    object_name => '&corrupted_object_name',
    object_type => rdsadmin.rdsadmin_dbms_repair.table_object,
    flags => rdsadmin.rdsadmin_dbms_repair.noskip_flag);
end;
/


select skip_corrupt from dba_tables where owner =  upper('&corrupted_Object_Owner') and table_name = upper('&corrupted_object_name');                     

-- Finally DROP the repair tables:
exec rdsadmin.rdsadmin_dbms_repair.drop_repair_table;
exec rdsadmin.rdsadmin_dbms_repair.drop_orphan_keys_table;                  
                             


Auditing:
-- Enable Auditing for all the privileges on SYS.AUD$ table:
SQL> EXEC rdsadmin.rdsadmin_master_util.audit_all_sys_aud_table;
SQL> EXEC rdsadmin.rdsadmin_master_util.audit_all_sys_aud_table(p_by_access => true); 

-- Disable Auditing on SYS.AUD$ table:
SQL> EXEC rdsadmin.rdsadmin_master_util.noaudit_all_sys_aud_table; 


RMAN Tasks:

-- Full RMAN database Backup in RDS:
 SQL> BEGIN
    rdsadmin.rdsadmin_rman_util.backup_database_full(
        p_owner               => 'SYS',
        p_directory_name      => 'BKP_DIR',
        p_level               => 0,               -- 0 For FULL, 1 for Incremental
     --p_parallel            => 4,              -- To be hashed if using a Standard Edition
        p_section_size_mb     => 10,
        p_rman_to_dbms_output => TRUE);
END;
/
 

-- Backup ALL ARCHIVELOGS: 
SQL> BEGIN
    rdsadmin.rdsadmin_rman_util.backup_archivelog_all(
        p_owner                     => 'SYS',
        p_directory_name      => 'BKP_DIR',
     --p_parallel                   => 6,              -- To be hashed if using a Standard Edition
        p_rman_to_dbms_output => TRUE);
END;
/


-- Backup ARCHIVELOGS between a date range:
 SQL> BEGIN
    rdsadmin.rdsadmin_rman_util.backup_archivelog_date(
        p_owner                 => 'SYS',
        p_directory_name  => 'BKP_DIR',
        p_from_date           => '01/15/2020 00:00:00',
        p_to_date                => '01/16/2020 00:00:00',
     --p_parallel                => 4,              -- To be hashed if using a Standard Edition
        p_rman_to_dbms_output => TRUE);
END;
/

           
Note: in case of using SCN/sequence replace "p_from_date" with "p_from_scn" or "p_from_sequence" and "p_to_date" with "p_to_scn" or "p_to_sequence".

-- Show Running RMAN Backups: [Manual backups running by you not by RDS as RDS is using Hot Backup method to backup the DB]
 SQL> SELECT to_char (start_time,'DD-MON-YY HH24:MI') START_TIME, to_char(end_time,'DD-MON-YY HH24:MI') END_TIME, time_taken_display, status,
input_type, output_device_type,input_bytes_display, output_bytes_display, output_bytes_per_sec_display,COMPRESSION_RATIO COMPRESS_RATIO
FROM v$rman_backup_job_details
WHERE status like 'RUNNING%';


-- Show current Running Hot Backups:
SQL> SELECT t.name AS "TB_NAME", d.file# as "DF#", d.name AS "DF_NAME", b.status
FROM V$DATAFILE d, V$TABLESPACE t, V$BACKUP b
WHERE d.TS#=t.TS#
AND b.FILE#=d.FILE#
AND b.STATUS='ACTIVE';


-- Validate the database for Physical/Logical corruption on RDS: 
SQL> BEGIN
    rdsadmin.rdsadmin_rman_util.validate_database(
        p_validation_type     => 'PHYSICAL+LOGICAL', 

      --p_parallel                  => 2,              -- To be hashed if running a Standard Edition
        p_section_size_mb     => 10,
        p_rman_to_dbms_output => TRUE);
END;
/


-- Enable BLOCK CHANGE TRACKING on RDS: [Avoid enabling it on 11g as you will hit a bug will hamper you from restoring the database later consistently]
SQL> SELECT status, filename FROM V$BLOCK_CHANGE_TRACKING;
SQL> EXEC rdsadmin.rdsadmin_rman_util.enable_block_change_tracking;

-- Disable BLOCK CHANGE TRACKING on RDS:
SQL> EXEC rdsadmin.rdsadmin_rman_util.disable_block_change_tracking;

-- Crosscheck and delete expired ARCHIVELOGS: [Which are not exist anymore on disk]
 SQL> EXEC rdsadmin.rdsadmin_rman_util.crosscheck_archivelog(p_delete_expired => TRUE, p_rman_to_dbms_output => TRUE);


Oracle Scheduler Jobs Management:

-- List all jobs:
SQL> select OWNER||'.'||JOB_NAME "OWNER.JOB_NAME",ENABLED,STATE,FAILURE_COUNT,to_char(LAST_START_DATE,'DD-Mon-YYYY hh24:mi:ss TZR')LAST_RUN,to_char(NEXT_RUN_DATE,'DD-Mon-YYYY hh24:mi:ss TZR')NEXT_RUN,REPEAT_INTERVAL,
extract(day from last_run_duration) ||':'||
lpad(extract(hour from last_run_duration),2,'0')||':'||
lpad(extract(minute from last_run_duration),2,'0')||':'||
lpad(round(extract(second from last_run_duration)),2,'0') "DURATION(d:hh:mm:ss)"
from dba_scheduler_jobs order by ENABLED,STATE,"OWNER.JOB_NAME";


-- List AUTOTASK INTERNAL MAINTENANCE WINDOWS:
SQL> SELECT WINDOW_NAME,TO_CHAR(WINDOW_NEXT_TIME,'DD-MM-YYYY HH24:MI:SS') NEXT_RUN,AUTOTASK_STATUS STATUS,WINDOW_ACTIVE ACTIVE,OPTIMIZER_STATS,SEGMENT_ADVISOR,SQL_TUNE_ADVISOR FROM DBA_AUTOTASK_WINDOW_CLIENTS;

-- FAILED JOBS IN THE LAST 24H:
SQL> select JOB_NAME,OWNER,LOG_DATE,STATUS,ERROR#,RUN_DURATION from DBA_SCHEDULER_JOB_RUN_DETAILS where STATUS='FAILED' and LOG_DATE > sysdate-1 order by JOB_NAME,LOG_DATE;

-- Current running jobs:
SQL> select j.RUNNING_INSTANCE INS,j.OWNER||'.'||j.JOB_NAME ||' | '||SLAVE_OS_PROCESS_ID||'|'||j.SESSION_ID"OWNER.JOB_NAME|OSPID|SID"
,s.FINAL_BLOCKING_SESSION "BLKD_BY",ELAPSED_TIME,CPU_USED
,substr(s.SECONDS_IN_WAIT||'|'||s.WAIT_CLASS||'|'||s.EVENT,1,45) "WAITED|WCLASS|EVENT",S.SQL_ID
from dba_scheduler_running_jobs j, gv$session s
where   j.RUNNING_INSTANCE=S.INST_ID(+)
and     j.SESSION_ID=S.SID(+)
order by "OWNER.JOB_NAME|OSPID|SID",ELAPSED_TIME;


-- Disable a job owned by SYS: [Only available on 11.2.0.4.v21 & 12.2.0.2.ru-2019-07.rur-2019-07.r1 and higher]
 SQL> EXEC rdsadmin.rdsadmin_dbms_scheduler.disable('SYS.CLEANUP_NON_EXIST_OBJ');

-- Modify the repeat interval of a job: [Only available on 11.2.0.4.v21 & 12.2.0.2.ru-2019-07.rur-2019-07.r1 and higher] 
SQL> BEGIN
     rdsadmin.rdsadmin_dbms_scheduler.set_attribute(
          name      => 'SYS.CLEANUP_NON_EXIST_OBJ',
          attribute => 'repeat_interval',
          value     => 'freq=daily;byday=FRI,SAT;byhour=20;byminute=0;bysecond=0');
END;
/


For more reading on Importing Data on RDS:
http://dba-tips.blogspot.com/2019/04/how-to-import-schema-on-amazon-rds.html

References:
https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Appendix.Oracle.CommonDBATasks.Database.html#Appendix.Oracle.CommonDBATasks.TimeZoneSupport
https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Appendix.Oracle.CommonDBATasks.Misc.html
https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Appendix.Oracle.CommonDBATasks.Log.html
https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Appendix.Oracle.CommonDBATasks.RMAN.html
https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Appendix.Oracle.CommonDBATasks.Scheduler.html

https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Appendix.Oracle.CommonDBATasks.System.html#Appendix.Oracle.CommonDBATasks.CustomPassword

Monday, February 17, 2020

DBA BUNDLE V5.6

New features in Bundle 5.6

export_data.sh script: For exporting data has been completely re-coded to come with the following new features:

- Execution of the export will run in the background "nohup mode".
- Ability to exclude multiple schemas and tables from Full Database Export mode.
- You can provide multiple schemas to export in the Schema Export mode.
- Ability to exclude multiple tables from Schema Export mode.

- You can export multiple tables in Table Export mode.
- Ability to COMPRESS the dump file on the fly for all LEGACY export modes.

  [This eliminates the need of compressing the dump file after the export operation, the thing will save the disk space]
- Parallelism option section will pop-up to the user only if the database edition supports parallelism [i.e. Enterprise Edition]

- Both Data Pump and Legacy export will use FLASHBACK SCN to export the data in a consistent state which is beneficial in cases such as
  replicating tables using Goldengate or Oracle Streams.
- By the end of the Export operation the user will receive an Email notification [optional].

Bug fixes applied on the following scripts:
rebuild_table.sh script: for rebuilding tables online.
configuration_baseline.sh script: For gathering the baseline info for OS & DB.
dbdailychk.sh script: for generating Health Check report on the DB.

To download the DBA Bundle latest version:
https://www.dropbox.com/s/k96rl0f4g39ukih/DBA_BUNDLE5.tar?dl=0

For more reading on what this bundle for and how to use it:
http://dba-tips.blogspot.com/2014/02/oracle-database-administration-scripts.html

Wednesday, February 5, 2020

New Shell Script For Exporting Data on Oracle

Download link:
https://www.dropbox.com/s/hk0pfo2tanop35r/export_data.sh?dl=0

Introduction:
Few years back I've shared a script to export data on Oracle DB, it was lacking some features like exporting multiple tables/schemas, excluding schemas from Full Export mode or excluding tables from Schema Export mode.

The new script I'm sharing in this post is coming with the following new features:

- Execution of the export will run in the background "nohup mode".
- Ability to exclude multiple schemas and tables from Full Database Export mode.
- You can provide multiple schemas to export in the Schema Export mode.
- Ability to exclude multiple tables from Schema Export mode.

- You can export multiple tables in Table Export mode.
- Ability to COMPRESS the dump file on the fly for all LEGACY export modes.

  [This eliminates the need of compressing the dump file after the export operation, the thing will save the disk space]
- Parallelism option section will pop-up to the user only if the database edition supports parallelism [i.e. Enterprise Edition]

- Both Data Pump and Legacy export will use FLASHBACK SCN to export the data in a consistent state which is beneficial in cases such as
  replicating tables using Goldengate or Oracle Streams.
- By the end of the Export operation the user will receive an Email notification [optional].

I kept the Legacy export utility (exp) as an option, because it will be beneficial over Data Pump for the following scenarios:

- Exporting data on 9i and older versions.
- Exporting data on a Read-Only Database [i.e. Standby database]
- No sufficient space to accommodate a regular (uncompressed) Data Pump dump file [Due to licensing or having a Standard Edition setup].
Out of the above mentioned reasons; it's always recommended to use Data Pump over Legacy Export to export the data.

How it works:

First it will ask you against which database you want to run the script: [Enter the Database NUMBER]











Then, you will be asked to Enter your Email to receive a notification upon the completion of the Export job: [you can hit enter to skip this part if you don't want to receive an Email notification]







Enter the location where the dump file will be located:





Select from the option list the Export mode you want to run: [Enter a number from 1 to 3]
Enter 1 for Database Full export.
Enter 2 for Schema export.
Enter 3 for Table export.








You will be prompted for the Export utility to use for this export job:
Enter 1 to select DataPump expdp, which gives you the luxury of excluding schemas/tables from the export, also you can run the export in Parallel (will come next).
Enter 2 to select Legacy exp, which is compatible with very old releases (9i and older) also it can utilize the On-the-fly compression of the export file using mknod OS tool for that purpose.






The script will check if Parallelism feature is enabled in the current DB edition; in case it's enabled it will prompt the user to enter the degree of parallelism to be used during the export, if the user is not interested in running the export in parallel he/she can Enter 1 to disable parallelism.
Note: If you use parallelism during the export this will scatter the final dump file into multiple files based on the degree of parallelism you have entered.





In case the Parallelism option is not enabled in the current Edition, i.e. you are using a Standard Edition, you will not be prompted for this option but a message will popup indicating that Parallelism is disabled.





Next, you will be prompted to use Compression to compress the export dump file:
Warning: If you selected DataPump export expdp make sure you already purchased Oracle Advanced Compression license before accepting to use this feature.
If you selected the Legacy export exp, don't worry about licensing as the compression will happen On-the-fly on OS side, No extra licenses are required!


If you selected DataPump export, you will be prompted for Exclude Schemas section, here you can exclude one or multiple schemas from the Full database export.
Enter the schemas you want to exclude separating them by comma <,> as shown in the red circle: <Not exactly a circle but I tried do my best using MS Paint 😊>










 If you already selected DataPump export, you will be prompted for Exclude Tables section, here you can exclude any tables from the Full database export. [Don't qualify the table with its owner, only the table_name as shown]









Export script will start creating the exporter user which will run the Export job then will create the pre & post scripts which will help you to import this dump file later. [It won't import anything, get back to your seat and relax 😉]

Then it will prompt you the commands to use in case you want to control the export job during its execution. i.e. you can PAUSE/RESUME/KILL the export job:















The export job will run in the background in the "nohup" mode. So you don't need to worry about keeping this SSH session opened, you can close it any time without impacting the export job.

Once the export job is completed, it will show you the FLASHBACK SCN that used during the export along with the import guidelines you can use whenever you want to import the same generated dump file later: [Again, the script will not import anything it will just show you guidelines]










By the end of the Export job you will receive an Email notification as well.

The same thing you will experience if you select other Export modes (SCHEMA or TABLE).

Note: When you select Legacy Export utility (exp), options like; Exclude Schemas, Exclude Tables and Parallel options will not appear as they are not available in the Legacy export utility, otherwise an option like Compression will be available because I'm using native OS compression (mknod+bzip2) inside the script.

On 31-Dec-2020, I've published a shell script to help you with importing data from a dump file: http://dba-tips.blogspot.com/2020/12/import-shell-script-for-importing-data.html

Hope you like the new script. Let me know if you want to add any feature or report a bug.

This script is part of the DBA BUNDLE, to read more about it please visit this link:
http://dba-tips.blogspot.ae/2014/02/oracle-database-administration-scripts.html

DISCLAIMER: THIS SCRIPT IS DISTRIBUTED IN THE HOPE THAT IT WILL BE USEFUL, BUT WITHOUT ANY WARRANTY. IT IS PROVIDED "AS IS".

GitHUB Version:
# ###################################################################################################################################################
# Ver: 3.4
# EXPORT DATABASE | SCHEMA | TABLE.
# To be run by ORACLE user
# # # #
# Author: Mahmmoud ADEL # # # # ###
# # # # # #
# Created: 03-02-2014
# Modified: 26-05-2014 Hashed METADATA export lines to clear the confusion.
# 21-08-2014 Added DEGREE OF PARALLELISM calculation.
# 22-01-2020 Passing the export DataPump parameters to a Par file.
# 22-01-2020 Convert the execution of the export in the background NOHUP mode.
# 23-01-2020 Added the option of providing multiple schemas to export.
# 23-01-2020 Added the option of providing multiple tables to export.
# 23-01-2020 Added the option of excluding specific schemas and tables from the Full DB export mode.
# 23-01-2020 Added the option of excluding specific tables from the SCHEMA mode export.
# 28-01-2020 Added the option of COMPRESSING the legacy dump file on the fly for all LEGACY export modes.
# 29-01-2020 Redesigned the parallelism option section to pop-up only when database edition support parallelism.
# 02-02-2020 Added Email Notification option.
# 17-08-2020 Fix a directory creation bug.
# 03-01-2020 Translation of user input . & ~
# 10-11-2021 Fixed the bug of script hung when entering a blank value for Parallelism degree.
# 30-05-2022 Adding CONTENT mode to allow the user export DDL or DATA ONLY.
# 30-05-2022 Adding the final Par file review before the start of the export job.
# 01-09-2022 Set a Warning message if the Exporter DB USER DBA_BUNDLEEXP7 is already exist and forcefully drop it if the user continue.
# ###################################################################################################################################################
# ###########
# Description:
# ###########
export SRV_NAME="`uname -n`"
echo
echo "=============================================="
echo "This script EXPORTS DATABASE | SCHEMA | TABLE."
echo "=============================================="
echo
sleep 1
# #######################################
# Excluded INSTANCES:
# #######################################
# Here you can mention the instances the script will IGNORE and will NOT run against:
# Use pipe "|" as a separator between each instance name.
# e.g. Excluding: -MGMTDB, ASM instances:
EXL_DB="\-MGMTDB|ASM|APX" #Excluded INSTANCES [Will not get reported offline].
# ###########################
# Listing Available Databases:
# ###########################
# Count Instance Numbers:
INS_COUNT=$( ps -ef|grep pmon|grep -v grep|egrep -v ${EXL_DB}|wc -l )
# Exit if No DBs are running:
if [[ $INS_COUNT -eq 0 ]]
then
echo "No Database is Running !"
echo
return
fi
# If there is ONLY one DB set it as default without prompt for selection:
if [[ $INS_COUNT -eq 1 ]]
then
export ORACLE_SID=$( ps -ef|grep pmon|grep -v grep|egrep -v ${EXL_DB}|awk '{print $NF}'|sed -e 's/ora_pmon_//g'|grep -v sed|grep -v "s///g" )
# If there is more than one DB ASK the user to select:
elif [[ $INS_COUNT -gt 1 ]]
then
echo
echo "Select the ORACLE_SID:[Enter the number]"
echo "---------------------"
select DB_ID in $( ps -ef|grep pmon|grep -v grep|egrep -v ${EXL_DB}|awk '{print $NF}'|sed -e 's/ora_pmon_//g'|grep -v sed|grep -v "s///g" )
do
integ='^[0-9]+$'
if ! [[ ${REPLY} =~ ${integ} ]] || [ ${REPLY} -gt ${INS_COUNT} ] || [ ${REPLY} -eq 0 ]
then
echo
echo "Error: Not a valid number!"
echo
echo "Enter a valid NUMBER from the displayed list !: i.e. Enter a number from [1 to ${INS_COUNT}]"
echo "----------------------------------------------"
else
export ORACLE_SID=$DB_ID
echo
printf "`echo "Selected Instance: ["` `echo -e "\033[33;5m${DB_ID}\033[0m"` `echo "]"`\n"
echo
break
fi
done
fi
# Exit if the user selected a Non Listed Number:
if [[ -z "${ORACLE_SID}" ]]
then
echo "You've Entered An INVALID ORACLE_SID"
exit
fi
# #########################
# Getting ORACLE_HOME
# #########################
ORA_USER=`ps -ef|grep ${ORACLE_SID}|grep pmon|grep -v grep|egrep -v ${EXL_DB}|grep -v "\-MGMTDB"|awk '{print $1}'|tail -1`
USR_ORA_HOME=`grep -i "^${ORA_USER}:" /etc/passwd| cut -f6 -d ':'|tail -1`
# SETTING ORATAB:
if [[ -f /etc/oratab ]]
then
ORATAB=/etc/oratab
export ORATAB
## If OS is Solaris:
elif [[ -f /var/opt/oracle/oratab ]]
then
ORATAB=/var/opt/oracle/oratab
export ORATAB
fi
# ATTEMPT1: Get ORACLE_HOME using pwdx command:
export PGREP=`which pgrep`
export PWDX=`which pwdx`
if [[ -x ${PGREP} ]] && [[ -x ${PWDX} ]]
then
PMON_PID=`pgrep -lf _pmon_${ORACLE_SID}|awk '{print $1}'`
export PMON_PID
ORACLE_HOME=`pwdx ${PMON_PID} 2>/dev/null|awk '{print $NF}'|sed -e 's/\/dbs//g'`
export ORACLE_HOME
fi
# ATTEMPT2: If ORACLE_HOME not found get it from oratab file:
if [[ ! -f ${ORACLE_HOME}/bin/sqlplus ]]
then
## If OS is Linux:
if [[ -f /etc/oratab ]]
then
ORATAB=/etc/oratab
ORACLE_HOME=`grep -v '^\#' ${ORATAB} | grep -v '^$'| grep -i "^${ORACLE_SID}:" | perl -lpe'$_ = reverse' | cut -f3 | perl -lpe'$_ = reverse' |cut -f2 -d':'`
export ORACLE_HOME
## If OS is Solaris:
elif [[ -f /var/opt/oracle/oratab ]]
then
ORATAB=/var/opt/oracle/oratab
ORACLE_HOME=`grep -v '^\#' ${ORATAB} | grep -v '^$'| grep -i "^${ORACLE_SID}:" | perl -lpe'$_ = reverse' | cut -f3 | perl -lpe'$_ = reverse' |cut -f2 -d':'`
export ORACLE_HOME
fi
fi
# ATTEMPT3: If ORACLE_HOME is in /etc/oratab, use dbhome command:
if [[ ! -f ${ORACLE_HOME}/bin/sqlplus ]]
then
ORACLE_HOME=`dbhome "${ORACLE_SID}"`
export ORACLE_HOME
fi
# ATTEMPT4: If ORACLE_HOME is still not found, search for the environment variable: [Less accurate]
if [[ ! -f ${ORACLE_HOME}/bin/sqlplus ]]
then
ORACLE_HOME=`env|grep -i ORACLE_HOME|sed -e 's/ORACLE_HOME=//g'`
export ORACLE_HOME
fi
# ATTEMPT5: If ORACLE_HOME is not found in the environment search user's profile: [Less accurate]
if [[ ! -f ${ORACLE_HOME}/bin/sqlplus ]]
then
ORACLE_HOME=`grep -h 'ORACLE_HOME=\/' ${USR_ORA_HOME}/.bash_profile ${USR_ORA_HOME}/.*profile | perl -lpe'$_ = reverse' |cut -f1 -d'=' | perl -lpe'$_ = reverse'|tail -1`
export ORACLE_HOME
fi
# ATTEMPT6: If ORACLE_HOME is still not found, search for orapipe: [Least accurate]
if [[ ! -f ${ORACLE_HOME}/bin/sqlplus ]]
then
if [[ -x /usr/bin/locate ]]
then
ORACLE_HOME=`locate -i orapipe|head -1|sed -e 's/\/bin\/orapipe//g'`
export ORACLE_HOME
fi
fi
# TERMINATE: If all above attempts failed to get ORACLE_HOME location, EXIT the script:
if [[ ! -f ${ORACLE_HOME}/bin/sqlplus ]]
then
echo "Please export ORACLE_HOME variable in your .bash_profile file under oracle user home directory in order to get this script to run properly"
echo "e.g."
echo "export ORACLE_HOME=/u01/app/oracle/product/11.2.0/db_1"
exit
fi
export LD_LIBRARY_PATH=${ORACLE_HOME}/lib
# ########################################
# Exit if the user is not the Oracle Owner:
# ########################################
CURR_USER=`whoami`
if [[ ${ORA_USER} != ${CURR_USER} ]]; then
echo ""
echo "You're Running This Sctipt with User: \"${CURR_USER}\" !!!"
echo "Please Run This Script With The Right OS User: \"${ORA_USER}\""
echo "Script Terminated!"
exit
fi
# ########################
# Getting ORACLE_BASE:
# ########################
# Get ORACLE_BASE from user's profile if not set:
if [[ -z "${ORACLE_BASE}" ]]
then
ORACLE_BASE=`grep -h 'ORACLE_BASE=\/' ${USR_ORA_HOME}/.bash* ${USR_ORA_HOME}/.*profile | perl -lpe'$_ = reverse' |cut -f1 -d'=' | perl -lpe'$_ = reverse'|tail -1`
export ORACLE_BASE
fi
# #########################
# EXPORT Section:
# #########################
# VARIABLES:
# #########
# Date Stamp:
DUMPDATE=`date +%d-%b-%Y`
#PASSHALF=`echo $((RANDOM % 999+7000))`
PASSHALF=`date '+%s'`
# If expdp version is 10g don't use REUSE_DUMPFILES parameter in the script:
VERSION=`strings ${ORACLE_HOME}/bin/expdp|grep Release|awk '{print $3}'`
case ${VERSION} in
10g) REUSE_DUMP='';;
*) REUSE_DUMP='REUSE_DUMPFILES=Y';;
# *) REUSE_DUMP='REUSE_DUMPFILES=Y COMPRESSION=ALL';;
esac
# Capturing the CURRENT_SCN to use it for a consistent DATA PUMP export:
#CURRENT_SCN_RAW=$(${ORACLE_HOME}/bin/sqlplus -s '/ as sysdba' << EOF
#set pages 0 lines 1000 feedback off;
#col current_scn for 99999999999999999999999999999999999
#select current_scn from v\$database;
#EOF
#)
#CURRENT_SCN=`echo ${CURRENT_SCN_RAW}| awk '{print $NF}'`
# case ${CURRENT_SCN} in
# *[0-9]*) export EXPORTSCN="FLASHBACK_SCN=${CURRENT_SCN}";;
# *) export EXPORTSCN="";;
# esac
VAL33=$(${ORACLE_HOME}/bin/sqlplus -s '/ as sysdba' << EOF
set pages 0 feedback off;
SELECT STATUS FROM V\$INSTANCE;
EOF
)
VAL44=`echo ${VAL33}| awk '{print $NF}'`
case ${VAL44} in
"OPEN") echo ;;
*) echo;echo "ERROR: INSTANCE [${ORACLE_SID}] IS IN STATUS: ${VAL44} !"
echo; echo "PLEASE OPEN INSTANCE [${ORACLE_SID}] AND RE-RUN THIS SCRIPT.";echo; exit ;;
esac
USER_OBJECTS_COUNT_RAW=$(${ORACLE_HOME}/bin/sqlplus -s '/ as sysdba' << EOF
set pages 0 feedback off;
SELECT COUNT(*) FROM DBA_OBJECTS WHERE OWNER='DBA_BUNDLEEXP7';
EOF
)
USER_OBJECTS_COUNT=`echo ${USER_OBJECTS_COUNT_RAW}| awk '{print $NF}'`
if [ ${USER_OBJECTS_COUNT} -gt 0 ]
then
echo
printf "`echo "The Exporter User [DBA_BUNDLEEXP7] is already EXIST in the database and has [${USER_OBJECTS_COUNT}] objects and "` `echo -e "\033[33;5mwill be DROPPED\033[0m"` `echo " by this script."`\n"
echo
fi
# ############################################
# Checking if PARALLELISM option is available:
# ############################################
# Computing the default PARALLEL DEGREE based on CPU count:
case `uname` in
Linux ) export PARALLEL_DEGREE=`cat /proc/cpuinfo| grep processor|wc -l`;;
AIX ) export PARALLEL_DEGREE=`lsdev -C|grep Process|wc -l`;;
SunOS ) export PARALLEL_DEGREE=`kstat cpu_info|grep core_id|sort -u|wc -l`;;
HP-UX) export PARALLEL_DEGREE=`lsdev -C|grep Process|wc -l`;;
esac
if [[ ! -z "${PARALLEL_DEGREE##[0-9]*}" ]]
then
export PARALLEL_DEGREE=1
fi
CHK_PARALLELISM_OPTION_RAW=$(${ORACLE_HOME}/bin/sqlplus -S "/ as sysdba" << EOF
set pages 0 feedback off echo off;
SELECT count(*) from v\$option where parameter='Parallel execution' and value='TRUE';
exit;
EOF
)
export CHK_PARALLELISM_OPTION=`echo ${CHK_PARALLELISM_OPTION_RAW} | awk '{print $NF}'`
# ##############################
# Prompt for EMAIL Confirmation:
# ##############################
echo "Enter your EMAIL to receive a notification upon the completion of the Export job: [Leave it BLANK or Enter N to Skip the notification]"
echo "================================================================================="
while read EMAILANS
do
case ${EMAILANS} in
""|"N"|"n"|"NO"|"No"|"no")export EMAILANS=""; export SENDEMAIL=""; echo; break;;
*@*.*) export SENDEMAIL="mail -s \"\${JOBSTATUS} on Server ${SRV_NAME}\" \${EMAILID} < \${LOGFILE}"; echo; break;;
*)echo ""
echo -e "\033[32;5mThis doesn't sound like a valid Email? ${EMAILANS}\033[0m"
echo ""
echo "Please Enter your Email: [Leave it BLANK or Enter N to Skip this!]"
echo "------------------------"
echo "i.e. john.smith@xyzcompany.com"
echo "";;
esac
done
echo "Enter the FULL LOCATION PATH where the EXPORT FILE will be saved under: [e.g. /backup/export]"
echo "======================================================================"
while read LOC1
do
case ${LOC1} in
'') export LOC1=`pwd`; echo; echo "DIRECTORY TRANSLATED TO: ${LOC1}";;
'.') export LOC1=`pwd`; echo; echo "DIRECTORY TRANSLATED TO: ${LOC1}";;
'~') export LOC1=${HOME}; echo; echo "DIRECTORY TRANSLATED TO: ${LOC1}";;
esac
if [[ -d "${LOC1}" ]] && [[ -r "${LOC1}" ]] && [[ -w "${LOC1}" ]]
then
echo "Export File will be saved under: ${LOC1}"; break
else
echo; printf "`echo "Please make sure that oracle user has"` `echo -e "\033[33;5mREAD/WRITE\033[0m"` `echo "permissions on the provided directory."`\n"; echo; echo "Enter the complete PATH where the dump file will be saved: [e.g. /backup/export]"
echo "----------------------------------------------------------"
fi
done
# ##############################
# Prompt for EMAIL Confirmation:
# ##############################
echo
echo "Do you want to Enable FLASHBACK SCN? [Y|N] [AKA: Export the data in CONSISTENT mode | Default [Y]]"
echo "===================================="
while read FLASCN
do
case ${FLASCN} in
""|"Y"|"y"|"YES"|"Yes"|"yes")
# Capturing the CURRENT_SCN to use it for a consistent DATA PUMP export:
CURRENT_SCN_RAW=$(${ORACLE_HOME}/bin/sqlplus -s '/ as sysdba' << EOF
set pages 0 lines 1000 feedback off;
col current_scn for 99999999999999999999999999999999999
select current_scn from v\$database;
EOF
)
CURRENT_SCN=`echo ${CURRENT_SCN_RAW}| awk '{print $NF}'`
export EXPORTSCN="FLASHBACK_SCN=${CURRENT_SCN}"
echo
echo "Data will be exported at the current SCN: [${CURRENT_SCN}]"
echo
echo "[Note: If the exported data is too big; make sure the UNDO_RETENTION & UNDO tablespace are big enough.]"
echo
break ;;
*)
echo
echo "FLASHBACK_SCN is DISABLED."
echo
export EXPORTSCN=""
break ;;
esac
done
# ######################
# Export Options:
# ######################
echo
echo "Please Select the EXPORT MODE: [Enter a number]"
echo "=============================="
echo "1. EXPORT FULL DATABASE"
echo "2. EXPORT SCHEMAS"
echo "3. EXPORT TABLES"
echo "[Enter a number from 1 to 3]"
echo ""
while read ANS
do
case $ANS in
1|"EXPORT FULL DATABASE"|"database"|"DATABASE"|"full"|"FULL")echo;echo "Entering EXPORT FULL DATABASE MODE ...";sleep 1
# #######################
# EXPORT DATABASE SECTION:
# #######################
echo
echo "WHICH EXPORT UTILITY YOU WANT TO USE: [DEFAULT IS DATAPUMP EXPDP]"
echo "===================================="
echo "1) DATAPUMP [EXPDP] |Pros: Faster when import, Cloud/PARALLELISM compatible, can Exclude schema/tables |Cons: COMPRESSION requires license"
echo "2) LEGACY EXPORT [EXP] |Pros: COMPRESSION can happen on the fly without license |Cons: Slower when import, No Cloud/PARALLELISM compatibility"
while read EXP_TOOL
do
case $EXP_TOOL in
""|"1"|"DATAPUMP"|"datapump"|"DATAPUMP [EXPDP]"|"[EXPDP]"|"EXPDP"|"expdp")
# Prompt the user the PARALLELISM option only if it's available in the DB Edition:
export INT='^[0-9]+$'
if [[ ${CHK_PARALLELISM_OPTION} =~ ${INT} ]]
then
if [ ${CHK_PARALLELISM_OPTION} -eq 1 ]
then
echo
echo "Enter the PARALLEL DEGREE you want to perform the export with PARALLELISM? [If used, The final dump file will be divided into multiple files!]"
echo "========================================================================="
echo "[Current CPU Count on this Server is: ${PARALLEL_DEGREE}]"
echo "Enter a number bigger than 1 to utilize PARALLELISM or enter 0 to disable PARALLELISM"
echo ""
while read PARALLEL_ANS
do
# Check if the input is an integer:
if [[ -z ${PARALLEL_ANS} ]]; then
export PARALLEL_ANS=0
fi
if [[ ${PARALLEL_ANS} =~ ${INT} ]]
then
# Check if the input is greater than 1:
if [ "${PARALLEL_ANS}" -gt 1 ]
then
export PARALLEL="PARALLEL=${PARALLEL_ANS}"
export PARA="_%u"
echo -e "\033[32;5mPARALLELISM ENABLED | The final dump file will be divided into multiple files based on the degree of parallelism you used.\033[0m"
echo
else
echo "PARALLELISM DISABLED.";echo ""
fi
break
fi
done
else
echo;echo -e "\033[32;5mPARALLELISM option is not available in the current Database Edition.\033[0m"
fi
fi
# PARAMETER FILE CREATION:
export DUMPFILENAME="EXPORT_FULL_DB_${ORACLE_SID}_${DUMPDATE}${PARA}.dmp"
export LOGFILE="${LOC1}/EXPORT_FULL_DB_${ORACLE_SID}_${DUMPDATE}.log"
PARFILE=${LOC1}/EXPORT_FULL_DB_${ORACLE_SID}.par
echo "# FULL DATABASE EXPORT PARAMETER FILE CREATED BY export_data.sh SCRIPT on [${DUMPDATE}]: [${ORACLE_SID}]" > ${PARFILE}
echo "FULL=Y" >> ${PARFILE}
echo "DIRECTORY=EXPORT_FILES_DBA_BUNDLE" >> ${PARFILE}
echo "DUMPFILE=${DUMPFILENAME}" >> ${PARFILE}
echo "LOGFILE=EXPORT_FULL_DB_${ORACLE_SID}_${DUMPDATE}.log" >> ${PARFILE}
echo "${EXPORTSCN}" >> ${PARFILE}
echo "${REUSE_DUMP}" >> ${PARFILE}
echo "${PARALLEL}" >> ${PARFILE}
printf "`echo "Do you want to enable the COMPRESSION [Y|N] [N] [Do NOT answer with YES unless you already acquired the"` `echo -e "\033[33;5mAdvanced Compression License\033[0m"` `echo "]"`\n"
echo "====================================="
while read COMP_ANS
do
case $COMP_ANS in
y|Y|yes|YES|Yes) echo;echo "COMPRESSION=ALL" >> ${PARFILE};echo -e "\033[32;5mCompression Enabled.\033[0m";echo; break ;;
""|n|N|no|NO|No) echo; echo "COMPRESSION DISABLED."; echo; break ;;
*) echo;echo "Please Enter a Valid Answer [Y|N]"
echo "---------------------------------";;
esac
done
echo
echo "Enter the SCHEMAS you want to EXCLUDE from the export, separating them by comma:"
echo "==============================================================================="
echo "i.e. ANONYMOUS,APPQOSSYS,AUDSYS,BI,CTXSYS,DBSNMP,DIP,DMSYS,DVF,DVSYS,EXDSYS,EXFSYS,GSMADMIN_INTERNAL,GSMCATUSER,GSMUSER,LBACSYS,MDSYS,MGMT_VIEW,MDDATA,MTSSYS,ODM,ODM_MTR,OJVMSYS,OLAPSYS,ORACLE_OCM,ORDDATA,ORDPLUGINS,ORDSYS,OUTLN,SI_INFORMTN_SCHEMA,SPATIAL_CSW_ADMIN,SPATIAL_CSW_ADMIN_USR,SPATIAL_WFS_ADMIN,SPATIAL_WFS_ADMIN_USR,SYS,SYSBACKUP,SYSDG,SYSKM,SYSMAN,SYSTEM,TSMSYS,WKPROXY,WKSYS,WK_TEST,WMSYS,XDB,XTISYS,DSSYS,PERFSTAT,REPADMIN,OEM_ADVISOR,OEM_MONITOR,OLAP_DBA,OLAP_USER,OWBSYS,OWBSYS_AUDIT,APEX_030200"
echo "[Leave it BLANK and hit Enter if you do NOT want to exclude any SCHEMAS]"
echo ""
while read EXCLUDESCHEMAVAR
do
case ${EXCLUDESCHEMAVAR} in
"") echo; export EXCLUDESCHEMA=""; break ;;
*) echo; export EXCLUDESCHEMA="EXCLUDE=SCHEMA:\"IN('$(sed s/,/\',\'/g <<<${EXCLUDESCHEMAVAR}| tr '[:lower:]' '[:upper:]')')\""
echo ${EXCLUDESCHEMA} >> ${PARFILE}; break ;;
esac
done
echo "Enter the TABLES you want to EXCLUDE from the export, separating them by comma:"
echo "=============================================================================="
echo "i.e. EMP,DEPT"
echo "[Leave it BLANK and hit Enter if you do NOT want to exclude any TABLES]"
echo ""
while read EXCLUDETABLEVAR
do
case ${EXCLUDETABLEVAR} in
"") echo; export EXCLUDETABLE=""; break ;;
*) echo; export EXCLUDETABLE="EXCLUDE=TABLE:\"IN('$(sed s/,/\',\'/g <<<${EXCLUDETABLEVAR}| tr '[:lower:]' '[:upper:]')')\""
echo ${EXCLUDETABLE} >> ${PARFILE}; break ;;
esac
done
echo
echo "Enter the CONTENT of data you want to Export:"
echo "============================================="
echo "1. DATA+METADATA [DEFAULT]"
echo "2. METADATA_ONLY [DDL]"
echo "3. DATA_ONLY"
echo ""
while read CONTENTVAR
do
case ${CONTENTVAR} in
""|"DATA+METADATA"|1) echo; echo "EXPORT MODE IS SET TO: [DATA + METADATA]"; echo; break ;;
"METADATA_ONLY"|"metadata_only"|"METADATA"|"metadata"|"DDL"|"ddl"|2) echo; export CONTENTVAR="CONTENT=METADATA_ONLY"; echo ${CONTENTVAR} >> ${PARFILE}; echo "EXPORT MODE IS SET TO: [METADATA_ONLY]"; echo; break ;;
"DATA_ONLY"|"data_only"|"DATA"|"data"|3) echo; export CONTENTVAR="CONTENT=DATA_ONLY"; echo ${CONTENTVAR} >> ${PARFILE}; echo "EXPORT MODE IS SET TO: [DATA_ONLY]"; echo; break ;;
*) echo; echo "Enter a correct option number between 1 to 3:"
echo "--------------------------------------------";;
esac
done
echo
echo "Enter the VERSION: [In case you want to import this dump later on a DB with LOWER version] | [Allowed value start from 9.2 and above] "
echo "================="
echo "e.g. If you will import this dump on a 10g DB then enter 10"
echo "For DEFAULT compatibility leave it BLANK."
echo ""
while read VERSION
do
case ${VERSION} in
""|"COMPATIBLE"|"compatible") echo; echo "DUMPFILE COMPATIBILITY version is set to the current DB compatibility level."; echo; break ;;
[0-9]) echo; echo "Wrong version number, this value cannot be set lower than 9.2!"
echo; echo "Enter a correct version higher than 9.2:"
echo "----------------------------------------";;
*) echo; VERSION="VERSION=${VERSION}"; echo ${VERSION} >> ${PARFILE}; echo "DUMPFILE COMPATIBILITY version is set to ${VERSION}."; echo; break ;;
esac
done
echo
echo "You are almost done!"; echo
sleep 1
echo "Please verify the export settings summary:"
echo "------------------------------------------"
cat ${PARFILE}
echo
sleep 1
echo "Shall we start the EXPORT job now? [[YES] | NO]"
echo "=================================="
while read STARTNOW
do
case ${STARTNOW} in
N|n|NO|no) echo; echo "SCRIPT TERMINATED! "; echo; exit;;
""|Y|y|YES|yes) echo; echo "STARTING THE IMPORT ..."; echo; break;;
*) echo "Please enter a valid answer: [YES|NO]";;
esac
done
cd ${LOC1}
SPOOLFILE2=${LOC1}/AFTER_IMPORT_DATABASE_${ORACLE_SID}_${DUMPDATE}.sql
echo "Creating the Exporter User DBA_BUNDLEEXP7 ..."
echo "Preparing the BEFORE and AFTER import script which will help you import the dump file later ..."
VAL11=$(${ORACLE_HOME}/bin/sqlplus -s '/ as sysdba' << EOF
PROMPT CREATE USER DBA_BUNDLEEXP7 [EXPORTER USER] (WILL BE DROPPED AFTER THE EXPORT) ...
CREATE USER DBA_BUNDLEEXP7 IDENTIFIED BY "BUNdle_#-^${PASSHALF}";
ALTER USER DBA_BUNDLEEXP7 IDENTIFIED BY "BUNdle_#-^${PASSHALF}" ACCOUNT UNLOCK;
GRANT CREATE SESSION TO DBA_BUNDLEEXP7;
GRANT DBA TO DBA_BUNDLEEXP7;
-- The following privileges to workaround Bug 6392040:
GRANT EXECUTE ON SYS.DBMS_DEFER_IMPORT_INTERNAL TO DBA_BUNDLEEXP7;
GRANT EXECUTE ON SYS.DBMS_EXPORT_EXTENSION TO DBA_BUNDLEEXP7;
PROMPT
PROMPT CREATING DIRECTORY EXPORT_FILES_DBA_BUNDLE POINTING TO ${LOC1} ...
CREATE OR REPLACE DIRECTORY EXPORT_FILES_DBA_BUNDLE AS '${LOC1}';
PROMPT
PROMPT CREATING AFTER DATABASE IMPORT SCRIPT ...
PROMPT
SET PAGES 0 TERMOUT OFF LINESIZE 157 ECHO OFF FEEDBACK OFF
SPOOL ${SPOOLFILE2}
SELECT 'PROMPT ' FROM DUAL;
SELECT 'PROMPT COMPILING DATABASE INVALID OBJECTS ...' FROM DUAL;
SELECT '@?/rdbms/admin/utlrp' FROM DUAL;
SELECT '@?/rdbms/admin/utlrp' FROM DUAL;
SELECT 'PROMPT ' FROM DUAL;
SELECT 'PROMPT THE FOLLOWING TRIGGERS ARE OWNED BY SYS SCHEMA AND MAY NOT BE EXIST AFTER THE IMPORT' FROM DUAL;
SELECT 'PROMPT YOU MAY CONSIDER CREATING THE NON EXIST TRIGGERS IF YOU NEED SO:' FROM DUAL;
SELECT 'PROMPT ***************************************************************' FROM DUAL;
SELECT 'PROMPT '||TRIGGER_TYPE||' TRIGGER: '||TRIGGER_NAME FROM DBA_TRIGGERS WHERE OWNER=UPPER('SYS') ORDER BY 1;
SELECT 'PROMPT ' FROM DUAL;
SELECT 'PROMPT CHECK IF THESE DIRECTORIES ARE POINTING TO THE RIGHT PATHS? ' FROM DUAL;
SELECT 'PROMPT *********************************************************** ' FROM DUAL;
COL DIRECTORY FOR A50
COL DIRECTORY_PATH FOR A100
SELECT 'PROMPT '||OWNER||'.'||DIRECTORY_NAME||': '||DIRECTORY_PATH FROM DBA_DIRECTORIES;
SELECT 'PROMPT ' FROM DUAL;
SPOOL OFF
EOF
)
echo
# Creation of the Export Script:
export EXPORTSCRIPT=${LOC1}/EXPORTSCRIPT.sh
export EXPORTSCRIPTRUNNER=${LOC1}/EXPORTSCRIPTRUNNER.sh
echo "# Export Script: [Created By DBA_BUNDLE]" > ${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "echo \"While the Export job is running, you can check the STATUS using:\"" >>${EXPORTSCRIPT}
echo "echo \"--------------------------------------------------------------- \"" >>${EXPORTSCRIPT}
echo "echo \"SELECT job_name, operation, job_mode, DEGREE, state FROM dba_datapump_jobs where OPERATION='EXPORT' and state='EXECUTING' and owner_name='DBA_BUNDLEEXP7';\"" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "echo \"Then you can ATTACH to the export job and control it using:\"" >>${EXPORTSCRIPT}
echo "echo \"---------------------------------------------------------- \"" >>${EXPORTSCRIPT}
echo "echo \"expdp DBA_BUNDLEEXP7/\"BUNdle_#-^${PASSHALF}\" ATTACH=<JOB_NAME_FROM_ABOVE_COMMAND>\"" >>${EXPORTSCRIPT}
echo "echo \"i.e.\"" >>${EXPORTSCRIPT}
echo "echo \"expdp DBA_BUNDLEEXP7/\"BUNdle_#-^${PASSHALF}\" ATTACH=SYS_EXPORT_FULL_01\"" >>${EXPORTSCRIPT}
echo "echo \"To Show the STATUS:....... STATUS\"" >>${EXPORTSCRIPT}
echo "echo \"To KILL the export:....... KILL_JOB\"" >>${EXPORTSCRIPT}
echo "echo \"To PAUSE the export:...... STOP_JOB\"" >>${EXPORTSCRIPT}
echo "echo \"To RESUME a paused export: START_JOB\"" >>${EXPORTSCRIPT}
echo "export ORACLE_SID=${ORACLE_SID}" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "echo 'Running The Export Job Now ...'" >>${EXPORTSCRIPT}
echo "${ORACLE_HOME}/bin/expdp DBA_BUNDLEEXP7/\"BUNdle_#-^${PASSHALF}\" PARFILE=${PARFILE}" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "echo 'Running Post Export Steps ...'" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "${ORACLE_HOME}/bin/sqlplus -s '/ as sysdba' << EOF" >>${EXPORTSCRIPT}
echo "PROMPT" >>${EXPORTSCRIPT}
echo "PROMPT DROPPING THE EXPORTER USER DBA_BUNDLEEXP7 ..." >>${EXPORTSCRIPT}
echo "DROP USER DBA_BUNDLEEXP7 CASCADE;" >>${EXPORTSCRIPT}
echo "EOF" >>${EXPORTSCRIPT}
echo "echo \"*****************\"" >>${EXPORTSCRIPT}
echo "echo \"IMPORT GUIDELINES:\"" >>${EXPORTSCRIPT}
echo "echo \"*****************\"" >>${EXPORTSCRIPT}
echo "echo \"FLASHBACK SCN used for this export is: ${CURRENT_SCN}\"" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "echo \"Later, AFTER IMPORTING THE DUMPFILE, RUN THIS SQL SCRIPT: ${SPOOLFILE2}\"" >>${EXPORTSCRIPT}
echo "echo \" => IT INCLUDES (HINT FOR TRIGGERS OWNED BY SYS) WHICH WILL NOT BE CREATED BY THE IMPORT PROCESS + COMPILING INVALID OBJECTS.\"" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "echo \"*************************\"" >>${EXPORTSCRIPT}
echo "echo \"EXPORT DUMP FILE LOCATION: ${LOC1}/${DUMPFILENAME}\"" >>${EXPORTSCRIPT}
echo "echo \"*************************\"" >>${EXPORTSCRIPT}
echo "export JOBSTATUS=\`grep \"successfully\\|stopped\\|completed\" ${LOGFILE}|tail -1\`" >>${EXPORTSCRIPT}
echo "export LOGFILE=${LOGFILE}" >>${EXPORTSCRIPT}
echo "export EMAILID=\"${EMAILANS}\"" >>${EXPORTSCRIPT}
echo "${SENDEMAIL}" >>${EXPORTSCRIPT}
chmod 740 ${EXPORTSCRIPT}
echo
echo "#!/bin/bash" > ${EXPORTSCRIPTRUNNER}
echo "nohup sh ${EXPORTSCRIPT}| tee ${LOGFILE} 2>&1 &" >>${EXPORTSCRIPTRUNNER}
chmod 740 ${EXPORTSCRIPTRUNNER}
echo -e "\033[32;5mFeel free to EXIT from this session as the EXPORT SCRIPT is running in the BACKGROUND.\033[0m"
source ${EXPORTSCRIPTRUNNER}
## Export METADATA ONLY: <using Legacy EXP because it's more reliable than EXPDP in exporting DDLs>
#echo;echo "CREATING A FILE CONTAINS ALL CREATION [DDL] STATEMENT OF ALL USERS|OBJECTS ...";sleep 1
#${ORACLE_HOME}/bin/exp DBA_BUNDLEEXP7/"BUNdle_#-^${PASSHALF}" FULL=y ROWS=N STATISTICS=NONE FILE=${LOC1}/${ORACLE_SID}_METADATA_${DUMPDATE}.dmp log=${LOC1}/${ORACLE_SID}_METADATA_${DUMPDATE}.log
## Getting READABLE export script: [DUMP REFINING]
#/usr/bin/strings ${LOC1}/${ORACLE_SID}_METADATA_${DUMPDATE}.dmp > ${LOC1}/${ORACLE_SID}_METADATA_REFINED_${DUMPDATE}.trc
echo; exit ;;
# In case the user will export the FULL database using EXP legacy tool:
"2"|"LEGACY EXPORT"|"LEGACY"|"EXPORT"|"LEGACY EXPORT [EXP]"|"EXP"|"[EXP]"|"exp"|"legacy export"|"legacy"|"export")
echo
printf "`echo "Do you want to enable the COMPRESSION [Y|N] [N] [COMPRESSION will happen on the fly using mknod] | "` `echo -e "\033[33;5mNo License required\033[0m"` `echo "]"`\n"
echo "====================================="
while read COMP_ANS
do
case $COMP_ANS in
y|Y|yes|YES|Yes) echo;export EXPORTDUMP="${LOC1}/EXPORT_FULL_DB_${ORACLE_SID}_${DUMPDATE}_pipe.dmp"
export MKNOD="rm -f ${EXPORTDUMP}; mknod ${EXPORTDUMP} p"
export ZIP="nohup bzip2 -fz < ${EXPORTDUMP} > ${LOC1}/EXPORT_FULL_DB_${ORACLE_SID}_${DUMPDATE}.dmp.bz2 &"
export EXPORTDUMPOUTPUT="${LOC1}/EXPORT_FULL_DB_${ORACLE_SID}_${DUMPDATE}.dmp.bz2"
export REMOVEMKDON="rm -f ${EXPORTDUMP}"
export UNZIPMESSAGE="First DE-COMPRESS the file using this command: bunzip2 -f ${EXPORTDUMPOUTPUT}"
echo -e "\033[32;5mCompression Enabled.\033[0m";echo; break ;;
""|n|N|no|NO|No) echo;export MKNOD=""
export ZIP=""
export EXPORTDUMP="${LOC1}/EXPORT_FULL_DB_${ORACLE_SID}_${DUMPDATE}.dmp"
export EXPORTDUMPOUTPUT="${LOC1}/EXPORT_FULL_DB_${ORACLE_SID}_${DUMPDATE}.dmp";break ;;
*) echo;echo "Please Enter a Valid Answer [Y|N]"
echo "---------------------------------";;
esac
done
echo
echo "EXPORTING DATABASE $ORACLE_SID [USING LEGACY EXP] ..."
sleep 1
cd ${LOC1}
SPOOLFILE2=${LOC1}/AFTER_IMPORT_DATABASE_${ORACLE_SID}_${DUMPDATE}.sql
echo "Creating the Exporter User DBA_BUNDLEEXP7 ..."
echo "Preparing the BEFORE and AFTER import script which will help you import the dump file later ..."
VAL11=$(${ORACLE_HOME}/bin/sqlplus -s '/ as sysdba' << EOF
PROMPT CREATE USER DBA_BUNDLEEXP7 [EXPORTER USER] (WILL BE DROPPED AFTER THE EXPORT) ...
CREATE USER DBA_BUNDLEEXP7 IDENTIFIED BY "BUNdle_#-^${PASSHALF}" ACCOUNT UNLOCK;
ALTER USER DBA_BUNDLEEXP7 IDENTIFIED BY "BUNdle_#-^${PASSHALF}" ACCOUNT UNLOCK;
GRANT CREATE SESSION TO DBA_BUNDLEEXP7;
GRANT EXP_FULL_DATABASE TO DBA_BUNDLEEXP7;
GRANT EXECUTE ON SYS.DBMS_FLASHBACK TO DBA_BUNDLEEXP7;
-- The following privileges to workaround Bug 6392040:
GRANT EXECUTE ON SYS.DBMS_DEFER_IMPORT_INTERNAL TO DBA_BUNDLEEXP7;
GRANT EXECUTE ON SYS.DBMS_EXPORT_EXTENSION TO DBA_BUNDLEEXP7;
PROMPT
PROMPT CREATING DIRECTORY EXPORT_FILES_DBA_BUNDLE POINTING TO ${LOC1} ...
CREATE OR REPLACE DIRECTORY EXPORT_FILES_DBA_BUNDLE AS '${LOC1}';
PROMPT
PROMPT CREATING AFTER DATABASE IMPORT SCRIPT ...
PROMPT
SET PAGES 0 TERMOUT OFF LINESIZE 157 ECHO OFF FEEDBACK OFF
SPOOL ${SPOOLFILE2}
SELECT 'PROMPT COMPILING DATABASE INVALID OBJECTS ...' FROM DUAL;
SELECT '@?/rdbms/admin/utlrp' FROM DUAL;
SELECT '@?/rdbms/admin/utlrp' FROM DUAL;
SELECT 'PROMPT ' FROM DUAL;
SELECT 'PROMPT THE FOLLOWING TRIGGERS ARE OWNED BY SYS SCHEMA AND MAY NOT BE EXIST AFTER THE IMPORT' FROM DUAL;
SELECT 'PROMPT YOU MAY CONSIDER CREATING THE NON EXIST TRIGGERS IF YOU NEED SO:' FROM DUAL;
SELECT 'PROMPT ***************************************************************' FROM DUAL;
SELECT 'PROMPT '||TRIGGER_TYPE||' TRIGGER: '||TRIGGER_NAME FROM DBA_TRIGGERS WHERE OWNER=UPPER('SYS') ORDER BY 1;
SELECT 'PROMPT ARE THESE DIRECTORIES POINTING TO THE RIGHT PATHS? ' FROM DUAL;
COL DIRECTORY FOR A50
COL DIRECTORY_PATH FOR A100
SELECT 'PROMPT '||OWNER||'.'||DIRECTORY_NAME||': '||DIRECTORY_PATH FROM DBA_DIRECTORIES;
SPOOL OFF
EOF
)
# Creation of the Post Export Script:
export DUMPFILENAME="EXPORT_FULL_DB_${ORACLE_SID}_${DUMPDATE}${PARA}.dmp"
export LOGFILE="${LOC1}/EXPORT_FULL_DB_${ORACLE_SID}_${DUMPDATE}.log"
export EXPORTSCRIPT=${LOC1}/EXPORTSCRIPT.sh
export EXPORTSCRIPTRUNNER=${LOC1}/EXPORTSCRIPTRUNNER.sh
echo "# Export Script: [Created By DBA_BUNDLE]" > ${EXPORTSCRIPT}
echo "export ORACLE_SID=${ORACLE_SID}" >>${EXPORTSCRIPT}
echo "echo 'Running The Export Job Now ...'" >>${EXPORTSCRIPT}
echo "${MKNOD}" >>${EXPORTSCRIPT}
echo "sleep 1" >>${EXPORTSCRIPT}
echo "${ZIP}" >>${EXPORTSCRIPT}
echo "${ORACLE_HOME}/bin/exp DBA_BUNDLEEXP7/\"BUNdle_#-^${PASSHALF}\" FULL=y DIRECT=y CONSISTENT=y STATISTICS=NONE FEEDBACK=100000 ${EXPORTSCN} RESUMABLE=y RESUMABLE_NAME=DBA_BUNDLE_EXPORT RESUMABLE_TIMEOUT=86400 FILE=${EXPORTDUMP} log=${LOC1}/EXPORT_FULL_DB_${ORACLE_SID}_${DUMPDATE}.log" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "echo 'Running Post Export Steps ...'" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "${ORACLE_HOME}/bin/sqlplus -s '/ as sysdba' << EOF" >>${EXPORTSCRIPT}
echo "PROMPT" >>${EXPORTSCRIPT}
echo "PROMPT DROPPING THE EXPORTER USER DBA_BUNDLEEXP7 ..." >>${EXPORTSCRIPT}
echo "DROP USER DBA_BUNDLEEXP7 CASCADE;" >>${EXPORTSCRIPT}
echo "EOF" >>${EXPORTSCRIPT}
echo "sleep 3" >>${EXPORTSCRIPT}
echo "${REMOVEMKDON}" >>${EXPORTSCRIPT}
echo "echo \"*****************\"" >>${EXPORTSCRIPT}
echo "echo \"IMPORT GUIDELINES:\"" >>${EXPORTSCRIPT}
echo "echo \"*****************\"" >>${EXPORTSCRIPT}
echo "echo \"FLASHBACK SCN used for this export is: ${CURRENT_SCN}\"" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "echo \"${UNZIPMESSAGE}\"" >>${EXPORTSCRIPT}
echo "echo \"Later, AFTER IMPORTING THE DUMPFILE, RUN THIS SQL SCRIPT: ${SPOOLFILE2}\"" >>${EXPORTSCRIPT}
echo "echo \" => IT INCLUDES (HINT FOR TRIGGERS OWNED BY SYS) WHICH WILL NOT BE CREATED BY THE IMPORT PROCESS + COMPILING INVALID OBJECTS.\"" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "echo \"**************************\"" >>${EXPORTSCRIPT}
echo "echo \"EXPORT DUMP FILE LOCATION: ${EXPORTDUMPOUTPUT}\"" >>${EXPORTSCRIPT}
echo "echo \"**************************\"" >>${EXPORTSCRIPT}
echo "export JOBSTATUS=\`grep \"successfully\\|stopped\\|completed\" ${LOGFILE}|tail -1\`" >>${EXPORTSCRIPT}
echo "export LOGFILE=${LOGFILE}" >>${EXPORTSCRIPT}
echo "export EMAILID=\"${EMAILANS}\"" >>${EXPORTSCRIPT}
echo "${SENDEMAIL}" >>${EXPORTSCRIPT}
chmod 740 ${EXPORTSCRIPT}
echo
echo "#!/bin/bash" > ${EXPORTSCRIPTRUNNER}
echo "nohup sh ${EXPORTSCRIPT}| tee ${LOGFILE} 2>&1 &" >>${EXPORTSCRIPTRUNNER}
chmod 740 ${EXPORTSCRIPTRUNNER}
echo -e "\033[32;5mFeel free to EXIT from this session as the EXPORT SCRIPT is running in the BACKGROUND.\033[0m"
source ${EXPORTSCRIPTRUNNER}
## Export METADATA ONLY: <using Legacy EXP because it's more reliable than EXPDP in exporting DDLs>
#echo
#echo "CREATING A FILE CONTAINS ALL CREATION [DDL] STATEMENT OF ALL USERS|OBJECTS ..."
#sleep 1
#${ORACLE_HOME}/bin/exp DBA_BUNDLEEXP7/"BUNdle_#-^${PASSHALF}" FULL=y ROWS=N STATISTICS=NONE FILE=${LOC1}/${ORACLE_SID}_METADATA_${DUMPDATE}.dmp log=${LOC1}/${ORACLE_SID}_METADATA_${DUMPDATE}.log
## Removing Extra Bad characters: [DUMP REFINING]
#/usr/bin/strings ${LOC1}/${ORACLE_SID}_METADATA_${DUMPDATE}.dmp > ${LOC1}/${ORACLE_SID}_METADATA_REFINED_${DUMPDATE}.trc
#echo
#echo "EXTRA FILES:"
#echo "-----------"
#echo "METADATA ONLY DUMP FILE <IMPORTABLE with [legacy exp utility]>: ${LOC1}/${ORACLE_SID}_METADATA_${DUMPDATE}.dmp"
#echo "DDL Script FILE <READABLE | Cannot be Imported>: ${LOC1}/${ORACLE_SID}_METADATA_REFINED_${DUMPDATE}.trc"
#echo "*****************************************************************"
echo; exit ;;
*) echo "Enter a valid number:"
echo "====================="
echo "i.e."
echo "1 for expdp tool"
echo "2 for exp tool"
echo ;;
esac
done
break;;
2|"EXPORT SCHEMAS"|"database"|"DATABASE"|"schema"|"schemas"|"SCHEMA"|"SCHEMAS")
echo
echo "Entering EXPORT SCHEMA MODE ..."
sleep 1
# ######################
# EXPORT SCHEMA SECTION:
# ######################
echo
echo "WHICH EXPORT UTILITY YOU WANT TO USE: [1) DATAPUMP [EXPDP]]"
echo "===================================="
echo "1) DATAPUMP [EXPDP] |Pros: Faster when import, Cloud/PARALLELISM compatible, can Exclude schema/tables |Cons: COMPRESSION requires license"
echo "2) LEGACY EXPORT [EXP] |Pros: COMPRESSION can happen on the fly without license |Cons: Slower when import, No Cloud/PARALLELISM compatibility"
while read EXP_TOOL
do
case $EXP_TOOL in
""|"1"|"DATAPUMP"|"datapump"|"DATAPUMP [EXPDP]"|"[EXPDP]"|"EXPDP"|"expdp")
if [[ ${CHK_PARALLELISM_OPTION} =~ ${INT} ]]
then
if [ ${CHK_PARALLELISM_OPTION} -eq 1 ]
then
echo
echo "Enter the PARALLEL DEGREE you want to perform the export with PARALLELISM? [If used, The final dump file will be divided into multiple files!]"
echo "========================================================================="
echo "[Current CPU Count on this Server is: ${PARALLEL_DEGREE}]"
echo "Enter a number bigger than 1 to utilize PARALLELISM or enter 0 to disable PARALLELISM"
echo ""
while read PARALLEL_ANS
do
# Check if the input is an integer:
if [[ -z ${PARALLEL_ANS} ]]; then
export PARALLEL_ANS=0
fi
if [[ ${PARALLEL_ANS} =~ ${INT} ]]
then
# Check if the input is greater than 1:
if [ "${PARALLEL_ANS}" -gt 1 ]
then
export PARALLEL="PARALLEL=${PARALLEL_ANS}"
export PARA="_%u"
echo -e "\033[32;5mPARALLELISM ENABLED | The final dump file will be divided into multiple files based on the degree of parallelism you used.\033[0m"
echo
else
echo "PARALLELISM DISABLED.";echo ""
fi
break
fi
done
else
echo;echo -e "\033[32;5mPARALLELISM option is not available in the current Database Edition.\033[0m"
fi
fi
# PARAMETER FILE CREATION:
export DUMPFILENAME="EXPORT_SCHEMA_${ORACLE_SID}_${DUMPDATE}${PARA}.dmp"
export LOGFILE="${LOC1}/EXPORT_SCHEMA_${ORACLE_SID}_${DUMPDATE}.log"
# PARAMETER FILE CREATION:
PARFILE=${LOC1}/EXPORT_SCHEMA_DB_${ORACLE_SID}.par
echo "# SCHEMA EXPORT PARAMETER FILE CREATED BY export_data.sh SCRIPT on [${DUMPDATE}]: [${ORACLE_SID}]" > ${PARFILE}
echo "DIRECTORY=EXPORT_FILES_DBA_BUNDLE" >> ${PARFILE}
echo "DUMPFILE=${DUMPFILENAME}" >> ${PARFILE}
echo "LOGFILE=EXPORT_SCHEMA_${ORACLE_SID}_${DUMPDATE}.log" >> ${PARFILE}
echo "${EXPORTSCN}" >> ${PARFILE}
echo "${REUSE_DUMP}" >> ${PARFILE}
echo "${PARALLEL}" >> ${PARFILE}
echo
echo "Enter the SCHEMAS you want to export, separating them by comma:"
echo "=============================================================="
echo "i.e. HR,SCOTT,OE"
echo ""
while read SCHEMASVAR
do
case ${SCHEMASVAR} in
"") echo; echo "Please Enter the Schema Name you want to export:"
echo "-----------------------------------------------"
echo "i.e. SCOTT,HR,OE"
echo "";;
*) echo
# Convert User's input into UPPERCASE:
export SCHEMASVAR="$(echo ${SCHEMASVAR}| tr [:lower:] [:upper:])"
export SCHEMA="SCHEMAS=${SCHEMASVAR}"
echo ${SCHEMA} >> ${PARFILE}
export SCHEMALIST="'$(sed s/,/\',\'/g <<<${SCHEMASVAR}| tr '[:lower:]' '[:upper:]')'"; break ;;
esac
done
echo ""
echo "Enter the TABLES you want to EXCLUDE from the export, separating them by comma:"
echo "==============================================================================="
echo "i.e. EMP,DEPT"
echo "[Leave it BLANK and hit Enter if you do NOT want to exclude any TABLES]"
echo ""
while read EXCLUDETABLEVAR
do
case ${EXCLUDETABLEVAR} in
"") echo; export EXCLUDETABLE=""; break ;;
*) echo; export EXCLUDETABLE="EXCLUDE=TABLE:\"IN('$(sed s/,/\',\'/g <<<${EXCLUDETABLEVAR}| tr '[:lower:]' '[:upper:]')')\""
echo ${EXCLUDETABLE} >> ${PARFILE}; break ;;
esac
done
echo
printf "`echo "Do you want to enable the COMPRESSION [Y|N] [N] [Do NOT answer with YES unless you already acquired the"` `echo -e "\033[33;5mAdvanced Compression License\033[0m"` `echo "]"`\n"
echo "====================================="
while read COMP_ANS
do
case $COMP_ANS in
y|Y|yes|YES|Yes) echo;echo "COMPRESSION=ALL" >> ${PARFILE};echo -e "\033[32;5mCompression Enabled.\033[0m";echo; break ;;
""|n|N|no|NO|No) echo; echo "COMPRESSION DISABLED."; echo; break ;;
*) echo;echo "Please Enter a Valid Answer: [Y|N]"
echo "----------------------------";;
esac
done
echo
echo "Enter the CONTENT of data you want to Export:"
echo "============================================="
echo "1. DATA+METADATA [DEFAULT]"
echo "2. METADATA_ONLY [DDL]"
echo "3. DATA_ONLY"
echo ""
while read CONTENTVAR
do
case ${CONTENTVAR} in
""|"DATA+METADATA"|1) echo; echo "EXPORT MODE IS SET TO: [DATA + METADATA]"; echo; break ;;
"METADATA_ONLY"|"metadata_only"|"METADATA"|"metadata"|"DDL"|"ddl"|2) echo; export CONTENTVAR="CONTENT=METADATA_ONLY"; echo ${CONTENTVAR} >> ${PARFILE}; echo "EXPORT MODE IS SET TO: [METADATA_ONLY]"; echo; break ;;
"DATA_ONLY"|"data_only"|"DATA"|"data"|3) echo; export CONTENTVAR="CONTENT=DATA_ONLY"; echo ${CONTENTVAR} >> ${PARFILE}; echo "EXPORT MODE IS SET TO: [DATA_ONLY]"; echo; break ;;
*) echo; echo "Enter a correct option number between 1 to 3:"
echo "--------------------------------------------";;
esac
done
echo
echo "Enter the VERSION: [In case you want to import this dump later on a DB with LOWER version] | [Allowed value start from 9.2 and above] "
echo "================="
echo "e.g. If you will import this dump on a 10g DB then enter 10"
echo "For DEFAULT compatibility leave it BLANK."
echo ""
while read VERSION
do
case ${VERSION} in
""|"COMPATIBLE"|"compatible") echo; echo "DUMPFILE COMPATIBILITY version is set to the current DB compatibility level."; echo; break ;;
[0-9]) echo; echo "Wrong version number, this value cannot be set lower than 9.2!"
echo; echo "Enter a correct version higher than 9.2:"
echo "----------------------------------------";;
*) echo; VERSION="VERSION=${VERSION}"; echo ${VERSION} >> ${PARFILE}; echo "DUMPFILE COMPATIBILITY version is set to ${VERSION}."; echo; break ;;
esac
done
echo
echo "You are almost done!"; echo
sleep 1
echo "Please verify the export settings summary:"
echo "------------------------------------------"
cat ${PARFILE}
echo
sleep 1
echo "Shall we start the EXPORT job now? [[YES] | NO]"
echo "=================================="
while read STARTNOW
do
case ${STARTNOW} in
N|n|NO|no) echo; echo "SCRIPT TERMINATED! "; echo; exit;;
""|Y|y|YES|yes) echo; echo "STARTING THE IMPORT ..."; echo; break;;
*) echo "Please enter a valid answer: [YES|NO]";;
esac
done
cd ${LOC1}
SPOOLFILE1=${LOC1}/BEFORE_IMPORT_SCHEMA_${ORACLE_SID}_${DUMPDATE}.sql
SPOOLFILE2=${LOC1}/AFTER_IMPORT_SCHEMA_${ORACLE_SID}_${DUMPDATE}.sql
echo "Creating the Exporter User DBA_BUNDLEEXP7 ..."
echo "Preparing the BEFORE and AFTER import script which will help you import the dump file later ..."
VAL11=$(${ORACLE_HOME}/bin/sqlplus -s '/ as sysdba' << EOF
PROMPT CREATE USER DBA_BUNDLEEXP7 [EXPORTER USER] (WILL BE DROPPED AFTER THE EXPORT) ...
CREATE USER DBA_BUNDLEEXP7 IDENTIFIED BY "BUNdle_#-^${PASSHALF}";
ALTER USER DBA_BUNDLEEXP7 IDENTIFIED BY "BUNdle_#-^${PASSHALF}" ACCOUNT UNLOCK;
GRANT CREATE SESSION TO DBA_BUNDLEEXP7;
GRANT DBA TO DBA_BUNDLEEXP7;
-- The following privileges to workaround Bug 6392040:
GRANT EXECUTE ON SYS.DBMS_DEFER_IMPORT_INTERNAL TO DBA_BUNDLEEXP7;
GRANT EXECUTE ON SYS.DBMS_EXPORT_EXTENSION TO DBA_BUNDLEEXP7;
PROMPT
PROMPT CREATING DIRECTORY EXPORT_FILES_DBA_BUNDLE POINTING TO ${LOC1} ...
CREATE OR REPLACE DIRECTORY EXPORT_FILES_DBA_BUNDLE AS '${LOC1}';
PROMPT
PROMPT CREATING BEFORE SCHEMA IMPORT SCRIPT ...
PROMPT
SET PAGES 0 TERMOUT OFF LINESIZE 157 ECHO OFF FEEDBACK OFF
SPOOL ${SPOOLFILE1}
SELECT 'CREATE USER ' || u.username ||' IDENTIFIED ' ||' BY VALUES ''' || c.password || ''' DEFAULT TABLESPACE ' || u.default_tablespace ||' TEMPORARY TABLESPACE ' || u.temporary_tablespace ||' PROFILE ' || u.profile || case when account_status= 'OPEN' then ';' else ' Account LOCK;' end "--Creation Statement"
FROM dba_users u,user$ c where u.username=c.name and u.username in (${SCHEMALIST})
UNION
SELECT 'CREATE ROLE '||GRANTED_ROLE||';' FROM DBA_ROLE_PRIVS WHERE GRANTEE in (${SCHEMALIST})
UNION
select 'GRANT '||GRANTED_ROLE||' TO '||GRANTEE|| case when ADMIN_OPTION='YES' then ' WITH ADMIN OPTION;' else ';' end "Granted Roles"
from dba_role_privs where grantee in (${SCHEMALIST})
UNION
select 'GRANT '||PRIVILEGE||' TO '||GRANTEE|| case when ADMIN_OPTION='YES' then ' WITH ADMIN OPTION;' else ';' end "Granted System Privileges"
from dba_sys_privs where grantee in (${SCHEMALIST})
UNION
select 'GRANT '||PRIVILEGE||' ON '||OWNER||'.'||TABLE_NAME||' TO '||GRANTEE||case when GRANTABLE='YES' then ' WITH GRANT OPTION;' else ';' end "Granted Object Privileges" from DBA_TAB_PRIVS where GRANTEE in (${SCHEMALIST});
SPOOL OFF
PROMPT CREATING AFTER SCHEMA IMPORT SCRIPT ...
PROMPT
SPOOL ${SPOOLFILE2}
select 'GRANT '||PRIVILEGE||' ON '||OWNER||'.'||TABLE_NAME||' TO '||GRANTEE||case when GRANTABLE='YES' then ' WITH GRANT OPTION;' else ';' end "Granted Object Privileges" from DBA_TAB_PRIVS where OWNER in (${SCHEMALIST})
UNION
SELECT 'CREATE PUBLIC SYNONYM '||SYNONYM_NAME||' FOR '||TABLE_OWNER||'.'||TABLE_NAME||';' FROM DBA_SYNONYMS WHERE TABLE_OWNER in (${SCHEMALIST}) AND OWNER=UPPER('PUBLIC');
PROMPT
SELECT 'PROMPT COMPILING DATABASE INVALID OBJECTS ...' FROM DUAL;
SELECT '@?/rdbms/admin/utlrp' FROM DUAL;
SELECT '@?/rdbms/admin/utlrp' FROM DUAL;
SELECT 'PROMPT ' FROM DUAL;
SELECT 'PROMPT THE FOLLOWING TRIGGERS ARE OWNED BY OTHER USERS BUT ARE DEPENDANT ON THE EXPORTED SCHEMAS OBJECTS' FROM DUAL;
SELECT 'PROMPT YOU MAY CONSIDER TO CREATE THEM AFTER THE SCHEMA IMPORT IF YOU NEED SO:' FROM DUAL;
SELECT 'PROMPT **********************************************************************' FROM DUAL;
SELECT 'PROMPT '||TRIGGER_TYPE||' TRIGGER: '||OWNER||'.'||TRIGGER_NAME||' =>ON TABLE: '||TABLE_OWNER||'.'||TABLE_NAME FROM DBA_TRIGGERS WHERE TABLE_OWNER IN (${SCHEMALIST}) AND OWNER NOT IN (${SCHEMALIST}) ORDER BY 1;
SPOOL OFF
EOF
)
echo
# Creation of the Export Script:
export EXPORTSCRIPT=${LOC1}/EXPORTSCRIPT.sh
export EXPORTSCRIPTRUNNER=${LOC1}/EXPORTSCRIPTRUNNER.sh
echo "# Export Script: [Created By DBA_BUNDLE]" > ${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "echo \"While the Export job is running, you can check the STATUS using:\"" >>${EXPORTSCRIPT}
echo "echo \"--------------------------------------------------------------- \"" >>${EXPORTSCRIPT}
echo "echo \"SELECT job_name, operation, job_mode, DEGREE, state FROM dba_datapump_jobs where OPERATION='EXPORT' and state='EXECUTING' and owner_name='DBA_BUNDLEEXP7';\"" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "echo \"Then you can ATTACH to the export job and control it using:\"" >>${EXPORTSCRIPT}
echo "echo \"---------------------------------------------------------- \"" >>${EXPORTSCRIPT}
echo "echo \"expdp DBA_BUNDLEEXP7/\"BUNdle_#-^${PASSHALF}\" ATTACH=<JOB_NAME_FROM_ABOVE_COMMAND>\"" >>${EXPORTSCRIPT}
echo "echo \"i.e.\"" >>${EXPORTSCRIPT}
echo "echo \"expdp DBA_BUNDLEEXP7/\"BUNdle_#-^${PASSHALF}\" ATTACH=SYS_EXPORT_SCHEMA_01\"" >>${EXPORTSCRIPT}
echo "echo \"To Show the STATUS:....... STATUS\"" >>${EXPORTSCRIPT}
echo "echo \"To KILL the export:....... KILL_JOB\"" >>${EXPORTSCRIPT}
echo "echo \"To PAUSE the export:...... STOP_JOB\"" >>${EXPORTSCRIPT}
echo "echo \"To RESUME a paused export: START_JOB\"" >>${EXPORTSCRIPT}
echo "export ORACLE_SID=${ORACLE_SID}" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "echo 'Running The Export Job Now ...'" >>${EXPORTSCRIPT}
echo "${ORACLE_HOME}/bin/expdp DBA_BUNDLEEXP7/\"BUNdle_#-^${PASSHALF}\" PARFILE=${PARFILE}" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "echo 'Running Post Export Steps ...'" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "${ORACLE_HOME}/bin/sqlplus -s '/ as sysdba' << EOF" >>${EXPORTSCRIPT}
echo "PROMPT" >>${EXPORTSCRIPT}
echo "PROMPT DROPPING THE EXPORTER USER DBA_BUNDLEEXP7 ..." >>${EXPORTSCRIPT}
echo "DROP USER DBA_BUNDLEEXP7 CASCADE;" >>${EXPORTSCRIPT}
echo "EOF" >>${EXPORTSCRIPT}
echo "echo \"*****************\"" >>${EXPORTSCRIPT}
echo "echo \"IMPORT GUIDELINES:\"" >>${EXPORTSCRIPT}
echo "echo \"*****************\"" >>${EXPORTSCRIPT}
echo "echo \"FLASHBACK SCN used for this export is: ${CURRENT_SCN}\"" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "echo \"BEFORE IMPORTING THE DUMPFILE IT'S RECOMMENDED TO RUN THIS SQL SCRIPT: ${SPOOLFILE1}\"" >>${EXPORTSCRIPT}
echo "echo \"It includes (USER|ROLES|GRANTED PRIVILEGES CREATION STATEMENTS), WHICH WILL NOT BE CREATED DURING THE IMPORT PROCESS.\"" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "echo \"AFTER IMPORTING THE DUMPFILE, RUN THIS SQL SCRIPT: ${SPOOLFILE2}\"" >>${EXPORTSCRIPT}
echo "echo \"It includes (Public Synonyms DDLs, Privileges granted to others, Hints for Triggers owned by others but depending on the exported schemas objects) + COMPILING INVALID OBJECTS, SUCH STUFF WILL NOT BE CARRIED OUT BY THE IMPORT PROCESS.\"" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "echo \"**************************\"" >>${EXPORTSCRIPT}
echo "echo \"EXPORT DUMP FILE LOCATION: ${LOC1}/${DUMPFILENAME}\"" >>${EXPORTSCRIPT}
echo "echo \"**************************\"" >>${EXPORTSCRIPT}
echo "export JOBSTATUS=\`grep \"successfully\\|stopped\\|completed\" ${LOGFILE}|tail -1\`" >>${EXPORTSCRIPT}
echo "export LOGFILE=${LOGFILE}" >>${EXPORTSCRIPT}
echo "export EMAILID=\"${EMAILANS}\"" >>${EXPORTSCRIPT}
echo "${SENDEMAIL}" >>${EXPORTSCRIPT}
chmod 740 ${EXPORTSCRIPT}
echo
echo "#!/bin/bash" > ${EXPORTSCRIPTRUNNER}
echo "nohup sh ${EXPORTSCRIPT}| tee ${LOGFILE} 2>&1 &" >>${EXPORTSCRIPTRUNNER}
chmod 740 ${EXPORTSCRIPTRUNNER}
echo -e "\033[32;5mFeel free to EXIT from this session as the EXPORT SCRIPT is running in the BACKGROUND.\033[0m"
source ${EXPORTSCRIPTRUNNER}
## Export METADATA ONLY: <using Legacy EXP because it's more reliable than EXPDP in exporting DDLs>
#echo;echo "CREATING A FILE CONTAINS ALL CREATION [DDL] STATEMENT OF ALL USERS|OBJECTS ...";sleep 1
#${ORACLE_HOME}/bin/exp DBA_BUNDLEEXP7/"BUNdle_#-^${PASSHALF}" FULL=y ROWS=N STATISTICS=NONE FILE=${LOC1}/${ORACLE_SID}_METADATA_${DUMPDATE}.dmp log=${LOC1}/${ORACLE_SID}_METADATA_${DUMPDATE}.log
## Getting READABLE export script: [DUMP REFINING]
#/usr/bin/strings ${LOC1}/${ORACLE_SID}_METADATA_${DUMPDATE}.dmp > ${LOC1}/${ORACLE_SID}_METADATA_REFINED_${DUMPDATE}.trc
echo; exit ;;
"2"|"LEGACY EXPORT"|"LEGACY"|"EXPORT"|"LEGACY EXPORT [EXP]"|"EXP"|"[EXP]"|"exp"|"legacy export"|"legacy"|"export")
DUMPFILE="${LOC1}/EXPORT_SCHEMA_${ORACLE_SID}_${DUMPDATE}.dmp"
LOGFILE="${LOC1}/EXPORT_SCHEMA_${ORACLE_SID}_${DUMPDATE}.log"
echo
echo "Enter the SCHEMAS you want to export, separating them by comma:"
echo "=============================================================="
echo "i.e. HR,SCOTT,OE"
echo ""
while read SCHEMASVAR
do
case ${SCHEMASVAR} in
"") echo; echo "Please Enter the Schema Name you want to export: [i.e. SCOTT,HR,OE]"
echo "-----------------------------------------------";;
*) echo
# Convert User's input into UPPERCASE:
export SCHEMASVAR="$(echo ${SCHEMASVAR}| tr [:lower:] [:upper:])"
export SCHEMALIST="'$(sed s/,/\',\'/g <<<${SCHEMASVAR}| tr '[:lower:]' '[:upper:]')'"; break ;;
esac
done
export EXPORTDUMP="${LOC1}/EXPORT_SCHEMA_${ORACLE_SID}_${DUMPDATE}.dmp"
export LOGFILE="${LOC1}/EXPORT_SCHEMA_${ORACLE_SID}_${DUMPDATE}.log"
echo
printf "`echo "Do you want to enable the COMPRESSION [Y|N] [N] [COMPRESSION will happen on the fly using mknod |"` `echo -e "\033[33;5mNo License required\033[0m"` `echo "]"`\n"
echo "====================================="
while read COMP_ANS
do
case $COMP_ANS in
y|Y|yes|YES|Yes) echo;export EXPORTDUMP="${LOC1}/EXPORT_SCHEMA_${ORACLE_SID}_${DUMPDATE}_pipe.dmp"
export MKNOD="rm -f ${EXPORTDUMP}; mknod ${EXPORTDUMP} p"
export ZIP="nohup bzip2 -fz < ${EXPORTDUMP} > ${LOC1}/EXPORT_SCHEMA_${ORACLE_SID}_${DUMPDATE}.dmp.bz2 &"
export EXPORTDUMPOUTPUT="${LOC1}/EXPORT_SCHEMA_${ORACLE_SID}_${DUMPDATE}.dmp.bz2"
export REMOVEMKDON="rm -f ${EXPORTDUMP}"
export UNZIPMESSAGE="First DE-COMPRESS the file using this command: bunzip2 -f ${EXPORTDUMPOUTPUT}"
echo -e "\033[32;5mCompression Enabled.\033[0m";echo; break ;;
""|n|N|no|NO|No) echo;export MKNOD=""
export ZIP=""
export EXPORTDUMP="${LOC1}/EXPORT_SCHEMA_${ORACLE_SID}_${DUMPDATE}.dmp"
export EXPORTDUMPOUTPUT="${LOC1}/EXPORT_SCHEMA_${ORACLE_SID}_${DUMPDATE}.dmp";break ;;
*) echo;echo "Please Enter a Valid Answer [Y|N]"
echo "---------------------------------";;
esac
done
cd ${LOC1}
SPOOLFILE1=${LOC1}/BEFORE_IMPORT_SCHEMA_${ORACLE_SID}_${DUMPDATE}.sql
SPOOLFILE2=${LOC1}/AFTER_IMPORT_SCHEMA_${ORACLE_SID}_${DUMPDATE}.sql
echo "Creating the Exporter User DBA_BUNDLEEXP7 ..."
echo "Preparing the BEFORE and AFTER import script which will help you import the dump file later ..."
VAL11=$(${ORACLE_HOME}/bin/sqlplus -s '/ as sysdba' << EOF
PROMPT CREATE USER DBA_BUNDLEEXP7 [EXPORTER USER] (WILL BE DROPPED AFTER THE EXPORT) ...
CREATE USER DBA_BUNDLEEXP7 IDENTIFIED BY "BUNdle_#-^${PASSHALF}";
ALTER USER DBA_BUNDLEEXP7 IDENTIFIED BY "BUNdle_#-^${PASSHALF}" ACCOUNT UNLOCK;
GRANT CREATE SESSION TO DBA_BUNDLEEXP7;
GRANT DBA TO DBA_BUNDLEEXP7;
-- The following privileges to workaround Bug 6392040:
GRANT EXECUTE ON SYS.DBMS_DEFER_IMPORT_INTERNAL TO DBA_BUNDLEEXP7;
GRANT EXECUTE ON SYS.DBMS_EXPORT_EXTENSION TO DBA_BUNDLEEXP7;
PROMPT
PROMPT CREATING DIRECTORY EXPORT_FILES_DBA_BUNDLE POINTING TO ${LOC1} ...
CREATE OR REPLACE DIRECTORY EXPORT_FILES_DBA_BUNDLE AS '${LOC1}';
PROMPT
PROMPT CREATING BEFORE SCHEMA IMPORT SCRIPT ...
PROMPT
SET PAGES 0 TERMOUT OFF LINESIZE 157 ECHO OFF FEEDBACK OFF
SPOOL ${SPOOLFILE1}
SELECT 'CREATE USER ' || u.username ||' IDENTIFIED ' ||' BY VALUES ''' || c.password || ''' DEFAULT TABLESPACE ' || u.default_tablespace ||' TEMPORARY TABLESPACE ' || u.temporary_tablespace ||' PROFILE ' || u.profile || case when account_status= 'OPEN' then ';' else ' Account LOCK;' end "--Creation Statement"
FROM dba_users u,user$ c where u.username=c.name and u.username in (${SCHEMALIST})
UNION
SELECT 'CREATE ROLE '||GRANTED_ROLE||';' FROM DBA_ROLE_PRIVS WHERE GRANTEE in (${SCHEMALIST})
UNION
select 'GRANT '||GRANTED_ROLE||' TO '||GRANTEE|| case when ADMIN_OPTION='YES' then ' WITH ADMIN OPTION;' else ';' end "Granted Roles"
from dba_role_privs where grantee in (${SCHEMALIST})
UNION
select 'GRANT '||PRIVILEGE||' TO '||GRANTEE|| case when ADMIN_OPTION='YES' then ' WITH ADMIN OPTION;' else ';' end "Granted System Privileges"
from dba_sys_privs where grantee in (${SCHEMALIST})
UNION
select 'GRANT '||PRIVILEGE||' ON '||OWNER||'.'||TABLE_NAME||' TO '||GRANTEE||case when GRANTABLE='YES' then ' WITH GRANT OPTION;' else ';' end "Granted Object Privileges" from DBA_TAB_PRIVS where GRANTEE in (${SCHEMALIST});
SPOOL OFF
PROMPT CREATING AFTER SCHEMA IMPORT SCRIPT ...
PROMPT
SPOOL ${SPOOLFILE2}
select 'GRANT '||PRIVILEGE||' ON '||OWNER||'.'||TABLE_NAME||' TO '||GRANTEE||case when GRANTABLE='YES' then ' WITH GRANT OPTION;' else ';' end "Granted Object Privileges" from DBA_TAB_PRIVS where OWNER in (${SCHEMALIST})
UNION
SELECT 'CREATE PUBLIC SYNONYM '||SYNONYM_NAME||' FOR '||TABLE_OWNER||'.'||TABLE_NAME||';' FROM DBA_SYNONYMS WHERE TABLE_OWNER in (${SCHEMALIST}) AND OWNER=UPPER('PUBLIC');
PROMPT
SELECT 'PROMPT COMPILING DATABASE INVALID OBJECTS ...' FROM DUAL;
SELECT '@?/rdbms/admin/utlrp' FROM DUAL;
SELECT '@?/rdbms/admin/utlrp' FROM DUAL;
SELECT 'PROMPT ' FROM DUAL;
SELECT 'PROMPT THE FOLLOWING TRIGGERS ARE OWNED BY OTHER USERS BUT ARE DEPENDANT ON THE EXPORTED SCHEMA OBJECTS' FROM DUAL;
SELECT 'PROMPT YOU MAY CONSIDER TO CREATE THEM AFTER THE SCHEMA IMPORT IF YOU NEED SO:' FROM DUAL;
SELECT 'PROMPT **********************************************************************' FROM DUAL;
SELECT 'PROMPT '||TRIGGER_TYPE||' TRIGGER: '||OWNER||'.'||TRIGGER_NAME||' =>ON TABLE: '||TABLE_OWNER||'.'||TABLE_NAME FROM DBA_TRIGGERS WHERE TABLE_OWNER in (${SCHEMALIST}) AND OWNER not in (${SCHEMALIST}) ORDER BY 1;
SPOOL OFF
EOF
)
# Creation of the Post Export Script:
export EXPORTSCRIPT=${LOC1}/EXPORTSCRIPT.sh
export EXPORTSCRIPTRUNNER=${LOC1}/EXPORTSCRIPTRUNNER.sh
echo "# Export Script: [Created By DBA_BUNDLE]" > ${EXPORTSCRIPT}
echo "export ORACLE_SID=${ORACLE_SID}" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "echo 'Running The Export Job Now ...'" >>${EXPORTSCRIPT}
echo "${MKNOD}" >>${EXPORTSCRIPT}
echo "sleep 1" >>${EXPORTSCRIPT}
echo "${ZIP}" >>${EXPORTSCRIPT}
echo "${ORACLE_HOME}/bin/exp DBA_BUNDLEEXP7/\"BUNdle_#-^${PASSHALF}\" OWNER=${SCHEMASVAR} DIRECT=y CONSISTENT=y STATISTICS=NONE FEEDBACK=100000 ${EXPORTSCN} RESUMABLE=y RESUMABLE_NAME=DBA_BUNDLE_EXPORT RESUMABLE_TIMEOUT=86400 FILE=${EXPORTDUMP} log=${LOGFILE}">>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "echo 'Running Post Export Steps ...'" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "${ORACLE_HOME}/bin/sqlplus -s '/ as sysdba' << EOF" >>${EXPORTSCRIPT}
echo "PROMPT" >>${EXPORTSCRIPT}
echo "PROMPT DROPPING THE EXPORTER USER DBA_BUNDLEEXP7 ..." >>${EXPORTSCRIPT}
echo "DROP USER DBA_BUNDLEEXP7 CASCADE;" >>${EXPORTSCRIPT}
echo "EOF" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "sleep 3" >>${EXPORTSCRIPT}
echo "${REMOVEMKDON}" >>${EXPORTSCRIPT}
echo "echo \"*****************\"" >>${EXPORTSCRIPT}
echo "echo \"IMPORT GUIDELINES:\"" >>${EXPORTSCRIPT}
echo "echo \"*****************\"" >>${EXPORTSCRIPT}
echo "echo \"FLASHBACK SCN used for this export is: ${CURRENT_SCN}\"" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "echo \"${UNZIPMESSAGE}\"" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "echo \"BEFORE IMPORTING THE DUMPFILE IT'S RECOMMENDED TO RUN THIS SQL SCRIPT: ${SPOOLFILE1}\"" >>${EXPORTSCRIPT}
echo "echo \"It includes (USER|ROLES|GRANTED PRIVILEGES CREATION STATEMENTS), WHICH WILL NOT BE CREATED DURING THE IMPORT PROCESS.\"" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "echo \"AFTER IMPORTING THE DUMPFILE, RUN THIS SQL SCRIPT: ${SPOOLFILE2}\"" >>${EXPORTSCRIPT}
echo "echo \"It includes (Public Synonyms DDLs, Privileges granted to others, Hints for Triggers owned by others but depending on the exported schemas objects) + COMPILING INVALID OBJECTS, SUCH STUFF WILL NOT BE CARRIED OUT BY THE IMPORT PROCESS.\"" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "echo \"**************************\"" >>${EXPORTSCRIPT}
echo "echo \"EXPORT DUMP FILE LOCATION: ${EXPORTDUMPOUTPUT}\"" >>${EXPORTSCRIPT}
echo "echo \"**************************\"" >>${EXPORTSCRIPT}
echo "export JOBSTATUS=\`grep \"successfully\\|stopped\\|completed\" ${LOGFILE}|tail -1\`" >>${EXPORTSCRIPT}
echo "export LOGFILE=${LOGFILE}" >>${EXPORTSCRIPT}
echo "export EMAILID=\"${EMAILANS}\"" >>${EXPORTSCRIPT}
echo "${SENDEMAIL}" >>${EXPORTSCRIPT}
chmod 740 ${EXPORTSCRIPT}
echo
echo "#!/bin/bash" > ${EXPORTSCRIPTRUNNER}
echo "nohup sh ${EXPORTSCRIPT}| tee ${LOGFILE} 2>&1 &" >>${EXPORTSCRIPTRUNNER}
chmod 740 ${EXPORTSCRIPTRUNNER}
echo -e "\033[32;5mFeel free to EXIT from this session as the EXPORT SCRIPT is running in the BACKGROUND.\033[0m"
source ${EXPORTSCRIPTRUNNER}
## Export METADATA ONLY: <using Legacy EXP because it's more reliable than EXPDP in exporting DDLs>
#echo
#echo "CREATING A FILE CONTAINS ALL CREATION [DDL] STATEMENT OF ALL USERS|OBJECTS ..."
#sleep 1
#${ORACLE_HOME}/bin/exp DBA_BUNDLEEXP7/"BUNdle_#-^${PASSHALF}" OWNER=${SCHEMA_NAME} ROWS=N STATISTICS=NONE FILE=${LOC1}/${SCHEMA_NAME}_${ORACLE_SID}_METADATA_${DUMPDATE}.dmp log=${LOC1}/${SCHEMA_NAME}_${ORACLE_SID}_METADATA_${DUMPDATE}.log
## Removing Extra Bad characters: [DUMP REFINING]
#/usr/bin/strings ${LOC1}/${SCHEMA_NAME}_${ORACLE_SID}_METADATA_${DUMPDATE}.dmp > ${LOC1}/${SCHEMA_NAME}_${ORACLE_SID}_METADATA_REFINED_${DUMPDATE}.trc
echo; exit ;;
*) echo "Enter a valid number:"
echo "====================="
echo "i.e."
echo "1 for expdp tool"
echo "2 for exp tool"
echo ;;
esac
done
break;;
3|"EXPORT TABLES"|"TABLES"|"tables"|"table") echo; echo "Entering EXPORT TABLE MODE ...";echo;sleep 1
# #####################
# EXPORT TABLE SECTION:
# #####################
echo
echo "Enter the TABLES you want to export, separating them by comma:"
echo "=============================================================="
echo "i.e. HR.EMPLOYEES,HR.DEPARTMENTS,SCOTT.BONUS"
echo ""
while read TABLESVAR
do
case ${TABLESVAR} in
"") echo; echo "Please mention the tables you want to export:"
echo "--------------------------------------------"
echo "i.e. HR.EMPLOYEES,HR.DEPARTMENTS,SCOTT.BONUS"
echo "";;
*) echo
# Convert User's input into UPPERCASE:
export TABLESVAR="$(echo ${TABLESVAR}| tr [:lower:] [:upper:])"
export TABLELIST="'$(sed s/,/\',\'/g <<<${TABLESVAR}| tr '[:lower:]' '[:upper:]')'"; break ;;
esac
done
echo "WHICH EXPORT UTILITY YOU WANT TO USE: [1) DATAPUMP [EXPDP]]"
echo "===================================="
echo "1) DATAPUMP [EXPDP] |Pros: Faster when import, Cloud/PARALLELISM compatible, can Exclude schema/tables |Cons: COMPRESSION requires license"
echo "2) LEGACY EXPORT [EXP] |Pros: COMPRESSION can happen on the fly without license |Cons: Slower when import, No Cloud/PARALLELISM compatibility"
while read EXP_TOOL
do
case $EXP_TOOL in
""|"1"|"DATAPUMP"|"datapump"|"DATAPUMP [EXPDP]"|"[EXPDP]"|"EXPDP"|"expdp")
if [[ ${CHK_PARALLELISM_OPTION} =~ ${INT} ]]
then
if [ ${CHK_PARALLELISM_OPTION} -eq 1 ]
then
echo
echo "Enter the PARALLEL DEGREE you want to perform the export with PARALLELISM? [If used, The final dump file will be divided into multiple files!]"
echo "========================================================================="
echo "[Current CPU Count on this Server is: ${PARALLEL_DEGREE}]"
echo "Enter a number bigger than 1 to utilize PARALLELISM or enter 0 to disable PARALLELISM"
echo ""
while read PARALLEL_ANS
do
# Check if the input is an integer:
if [[ -z ${PARALLEL_ANS} ]]; then
export PARALLEL_ANS=0
fi
if [[ ${PARALLEL_ANS} =~ ${INT} ]]
then
# Check if the input is greater than 1:
if [ "${PARALLEL_ANS}" -gt 1 ]
then
export PARALLEL="PARALLEL=${PARALLEL_ANS}"
export PARA="_%u"
echo -e "\033[32;5mPARALLELISM ENABLED | The final dump file will be divided into multiple files based on the degree of parallelism you used.\033[0m"
echo
else
echo "PARALLELISM DISABLED.";echo ""
fi
break
fi
done
else
echo;echo -e "\033[32;5mPARALLELISM option is not available in the current Database Edition.\033[0m"
fi
fi
# PARAMETER FILE CREATION:
export DUMPFILENAME="EXPORT_TABLE_${ORACLE_SID}_${DUMPDATE}${PARA}.dmp"
export LOGFILE="${LOC1}/EXPORT_TABLE_${ORACLE_SID}_${DUMPDATE}.log"
# PARAMETER FILE CREATION:
PARFILE=${LOC1}/EXPORT_TABLE_DB_${ORACLE_SID}.par
echo "# TABLE EXPORT PARAMETER FILE CREATED BY export_data.sh SCRIPT on [${DUMPDATE}]: [${ORACLE_SID}]" > ${PARFILE}
echo "DIRECTORY=EXPORT_FILES_DBA_BUNDLE" >> ${PARFILE}
echo "DUMPFILE=${DUMPFILENAME}" >> ${PARFILE}
echo "LOGFILE=EXPORT_TABLE_${ORACLE_SID}_${DUMPDATE}.log" >> ${PARFILE}
echo "${EXPORTSCN}" >> ${PARFILE}
echo "${REUSE_DUMP}" >> ${PARFILE}
echo "TABLES=${TABLESVAR}" >> ${PARFILE}
echo "${PARALLEL}" >> ${PARFILE}
echo
printf "`echo "Do you want to enable the COMPRESSION [Y|N] [N] [Do NOT answer with YES unless you already acquired the"` `echo -e "\033[33;5mAdvanced Compression License\033[0m"` `echo "]"`\n"
echo "====================================="
while read COMP_ANS
do case $COMP_ANS in
y|Y|yes|YES|Yes) echo;echo "COMPRESSION=ALL" >> ${PARFILE};echo -e "\033[32;5mCompression Enabled.\033[0m";echo; break ;;
""|n|N|no|NO|No) echo; echo "COMPRESSION DISABLED."; echo; break ;;
*) echo;echo "Please Enter a Valid Answer: [Y|N]"
echo "----------------------------";;
esac
done
echo
echo "Enter the CONTENT of data you want to Export:"
echo "============================================="
echo "1. DATA+METADATA [DEFAULT]"
echo "2. METADATA_ONLY [DDL]"
echo "3. DATA_ONLY"
echo ""
while read CONTENTVAR
do
case ${CONTENTVAR} in
""|"DATA+METADATA"|1) echo; echo "EXPORT MODE IS SET TO: [DATA + METADATA]"; echo; break ;;
"METADATA_ONLY"|"metadata_only"|"METADATA"|"metadata"|"DDL"|"ddl"|2) echo; export CONTENTVAR="CONTENT=METADATA_ONLY"; echo ${CONTENTVAR} >> ${PARFILE}; echo "EXPORT MODE IS SET TO: [METADATA_ONLY]"; echo; break ;;
"DATA_ONLY"|"data_only"|"DATA"|"data"|3) echo; export CONTENTVAR="CONTENT=DATA_ONLY"; echo ${CONTENTVAR} >> ${PARFILE}; echo "EXPORT MODE IS SET TO: [DATA_ONLY]"; echo; break ;;
*) echo; echo "Enter a correct option number between 1 to 3:"
echo "--------------------------------------------";;
esac
done
echo
echo "Enter the VERSION: [In case you want to import this dump later on a DB with LOWER version] | [Allowed value start from 9.2 and above] "
echo "================="
echo "e.g. If you will import this dump on a 10g DB then enter 10"
echo "For DEFAULT compatibility leave it BLANK."
echo ""
while read VERSION
do
case ${VERSION} in
""|"COMPATIBLE"|"compatible") echo; echo "DUMPFILE COMPATIBILITY version is set to the current DB compatibility level."; echo; break ;;
[0-9]) echo; echo "Wrong version number, this value cannot be set lower than 9.2!"
echo; echo "Enter a correct version higher than 9.2:"
echo "----------------------------------------";;
*) echo; VERSION="VERSION=${VERSION}"; echo ${VERSION} >> ${PARFILE}; echo "DUMPFILE COMPATIBILITY version is set to ${VERSION}."; echo; break ;;
esac
done
echo
echo "You are almost done!"; echo
sleep 1
echo "Please verify the export settings summary:"
echo "------------------------------------------"
cat ${PARFILE}
echo
sleep 1
echo "Shall we start the EXPORT job now? [[YES] | NO]"
echo "=================================="
while read STARTNOW
do
case ${STARTNOW} in
N|n|NO|no) echo; echo "SCRIPT TERMINATED! "; echo; exit;;
""|Y|y|YES|yes) echo; echo "STARTING THE IMPORT ..."; echo; break;;
*) echo "Please enter a valid answer: [YES|NO]";;
esac
done
echo "Creating the Exporter User DBA_BUNDLEEXP7 ..."
echo "Preparing the BEFORE and AFTER import script which will help you import the dump file later ..."
SPOOLFILE2=${LOC1}/AFTER_IMPORT_TABLE_${DUMPDATE}.sql
VAL11=$(${ORACLE_HOME}/bin/sqlplus -s '/ as sysdba' << EOF
PROMPT CREATE USER DBA_BUNDLEEXP7 [EXPORTER USER] (WILL BE DROPPED AFTER THE EXPORT) ...
CREATE USER DBA_BUNDLEEXP7 IDENTIFIED BY "BUNdle_#-^${PASSHALF}";
ALTER USER DBA_BUNDLEEXP7 IDENTIFIED BY "BUNdle_#-^${PASSHALF}" ACCOUNT UNLOCK;
GRANT CREATE SESSION TO DBA_BUNDLEEXP7;
GRANT DBA TO DBA_BUNDLEEXP7;
-- The following privileges to workaround Bug 6392040:
GRANT EXECUTE ON SYS.DBMS_DEFER_IMPORT_INTERNAL TO DBA_BUNDLEEXP7;
GRANT EXECUTE ON SYS.DBMS_EXPORT_EXTENSION TO DBA_BUNDLEEXP7;
PROMPT CREATING DIRECTORY EXPORT_FILES_DBA_BUNDLE POINTING TO ${LOC1} ...
CREATE OR REPLACE DIRECTORY EXPORT_FILES_DBA_BUNDLE AS '${LOC1}';
PROMPT
PROMPT CREATING AFTER TABLE IMPORT SCRIPT ...
PROMPT
SET PAGES 0 TERMOUT OFF LINESIZE 157 ECHO OFF FEEDBACK OFF
SPOOL ${SPOOLFILE2}
SELECT 'CREATE SYNONYM '||OWNER||'.'||SYNONYM_NAME||' FOR '||TABLE_OWNER||'.'||TABLE_NAME||';' FROM DBA_SYNONYMS
WHERE TABLE_OWNER||'.'||TABLE_NAME in (${TABLELIST}) AND OWNER <> UPPER('PUBLIC')
UNION
SELECT 'CREATE PUBLIC SYNONYM '||SYNONYM_NAME||' FOR '||TABLE_OWNER||'.'||TABLE_NAME||';' FROM DBA_SYNONYMS
WHERE TABLE_OWNER||'.'||TABLE_NAME in (${TABLELIST}) AND OWNER=UPPER('PUBLIC');
SPOOL OFF
EOF
)
# Creation of the Export Script:
export EXPORTSCRIPT=${LOC1}/EXPORTSCRIPT.sh
export EXPORTSCRIPTRUNNER=${LOC1}/EXPORTSCRIPTRUNNER.sh
echo "# Export Script: [Created By DBA_BUNDLE]" > ${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "echo \"While the Export job is running, you can check the STATUS using:\"" >>${EXPORTSCRIPT}
echo "echo \"--------------------------------------------------------------- \"" >>${EXPORTSCRIPT}
echo "echo \"SELECT job_name, operation, job_mode, DEGREE, state FROM dba_datapump_jobs where OPERATION='EXPORT' and state='EXECUTING' and owner_name='DBA_BUNDLEEXP7';\"" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "echo \"Then you can ATTACH to the export job and control it using:\"" >>${EXPORTSCRIPT}
echo "echo \"---------------------------------------------------------- \"" >>${EXPORTSCRIPT}
echo "echo \"expdp DBA_BUNDLEEXP7/\"BUNdle_#-^${PASSHALF}\" ATTACH=<JOB_NAME_FROM_ABOVE_COMMAND>\"" >>${EXPORTSCRIPT}
echo "echo \"i.e.\"" >>${EXPORTSCRIPT}
echo "echo \"expdp DBA_BUNDLEEXP7/\"BUNdle_#-^${PASSHALF}\" ATTACH=SYS_EXPORT_TABLE_01\"" >>${EXPORTSCRIPT}
echo "echo \"To Show the STATUS:....... STATUS\"" >>${EXPORTSCRIPT}
echo "echo \"To KILL the export:....... KILL_JOB\"" >>${EXPORTSCRIPT}
echo "echo \"To PAUSE the export:...... STOP_JOB\"" >>${EXPORTSCRIPT}
echo "echo \"To RESUME a paused export: START_JOB\"" >>${EXPORTSCRIPT}
echo "export ORACLE_SID=${ORACLE_SID}" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "echo 'Running The Export Job Now ...'" >>${EXPORTSCRIPT}
echo "${ORACLE_HOME}/bin/expdp DBA_BUNDLEEXP7/\"BUNdle_#-^${PASSHALF}\" PARFILE=${PARFILE}" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "echo 'Running Post Export Steps ...'" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "${ORACLE_HOME}/bin/sqlplus -s '/ as sysdba' << EOF" >>${EXPORTSCRIPT}
echo "PROMPT" >>${EXPORTSCRIPT}
echo "PROMPT DROPPING THE EXPORTER USER DBA_BUNDLEEXP7 ..." >>${EXPORTSCRIPT}
echo "DROP USER DBA_BUNDLEEXP7 CASCADE;" >>${EXPORTSCRIPT}
echo "EOF" >>${EXPORTSCRIPT}
echo "echo \"*****************\"" >>${EXPORTSCRIPT}
echo "echo \"IMPORT GUIDELINES:\"" >>${EXPORTSCRIPT}
echo "echo \"*****************\"" >>${EXPORTSCRIPT}
echo "echo \"FLASHBACK SCN used for this export is: ${CURRENT_SCN}\"" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "echo \"AFTER IMPORTING THE DUMPFILE, RUN THIS SQL SCRIPT: ${SPOOLFILE2}\"" >>${EXPORTSCRIPT}
echo "echo \"IT INCLUDES (PRIVATE & PUBLIC SYNONYMS DDLS) WHICH WILL NOT BE HANDELED BY THE IMPORT PROCESS.\"" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "echo \"**************************\"" >>${EXPORTSCRIPT}
echo "echo \"EXPORT DUMP FILE LOCATION: ${LOC1}/${DUMPFILENAME}\"" >>${EXPORTSCRIPT}
echo "echo \"**************************\"" >>${EXPORTSCRIPT}
echo "export JOBSTATUS=\`grep \"successfully\\|stopped\\|completed\" ${LOGFILE}|tail -1\`" >>${EXPORTSCRIPT}
echo "export LOGFILE=${LOGFILE}" >>${EXPORTSCRIPT}
echo "export EMAILID=\"${EMAILANS}\"" >>${EXPORTSCRIPT}
echo "${SENDEMAIL}" >>${EXPORTSCRIPT}
chmod 740 ${EXPORTSCRIPT}
echo
echo "#!/bin/bash" > ${EXPORTSCRIPTRUNNER}
echo "nohup sh ${EXPORTSCRIPT}| tee ${LOGFILE} 2>&1 &" >>${EXPORTSCRIPTRUNNER}
chmod 740 ${EXPORTSCRIPTRUNNER}
echo -e "\033[32;5mFeel free to EXIT from this session as the EXPORT SCRIPT is running in the BACKGROUND.\033[0m"
source ${EXPORTSCRIPTRUNNER}
echo; exit ;;
"2"|"LEGACY EXPORT"|"LEGACY"|"EXPORT"|"LEGACY EXPORT [EXP]"|"EXP"|"[EXP]"|"exp"|"legacy export"|"legacy"|"export")
export EXPORTDUMP="${LOC1}/EXPORT_TABLE_${ORACLE_SID}_${DUMPDATE}.dmp"
export LOGFILE="${LOC1}/EXPORT_TABLE_${ORACLE_SID}_${DUMPDATE}.log"
echo
printf "`echo "Do you want to enable the COMPRESSION [Y|N] [N] [COMPRESSION will happen on the fly using mknod |"` `echo -e "\033[33;5mNo License required\033[0m"` `echo "]"`\n"
echo "====================================="
while read COMP_ANS
do
case $COMP_ANS in
y|Y|yes|YES|Yes) echo;export EXPORTDUMP="${LOC1}/EXPORT_TABLE_${ORACLE_SID}_${DUMPDATE}_pipe.dmp"
export MKNOD="rm -f ${EXPORTDUMP}; mknod ${EXPORTDUMP} p"
export ZIP="nohup bzip2 -fz < ${EXPORTDUMP} > ${LOC1}/EXPORT_TABLE_${ORACLE_SID}_${DUMPDATE}.dmp.bz2 &"
export EXPORTDUMPOUTPUT="${LOC1}/EXPORT_TABLE_${ORACLE_SID}_${DUMPDATE}.dmp.bz2"
export REMOVEMKDON="rm -f ${EXPORTDUMP}"
export UNZIPMESSAGE="First DE-COMPRESS the file using this command: bunzip2 -f ${EXPORTDUMPOUTPUT}"
echo -e "\033[32;5mCompression Enabled.\033[0m";echo; break ;;
""|n|N|no|NO|No) echo;export MKNOD=""
export ZIP=""
export EXPORTDUMP="${LOC1}/EXPORT_TABLE_${ORACLE_SID}_${DUMPDATE}.dmp"
export EXPORTDUMPOUTPUT="${LOC1}/EXPORT_TABLE_${ORACLE_SID}_${DUMPDATE}.dmp";break ;;
*) echo;echo "Please Enter a Valid Answer [Y|N]"
echo "---------------------------------";;
esac
done
SPOOLFILE2=${LOC1}/AFTER_IMPORT_TABLE_DB_${ORACLE_SID}_${DUMPDATE}.sql
echo "Creating the Exporter User DBA_BUNDLEEXP7 ..."
echo "Preparing the BEFORE and AFTER import script which will help you import the dump file later ..."
VAL11=$(${ORACLE_HOME}/bin/sqlplus -s '/ as sysdba' << EOF
PROMPT CREATE USER DBA_BUNDLEEXP7 [EXPORTER USER] (WILL BE DROPPED AFTER THE EXPORT) ...
CREATE USER DBA_BUNDLEEXP7 IDENTIFIED BY "BUNdle_#-^${PASSHALF}";
ALTER USER DBA_BUNDLEEXP7 IDENTIFIED BY "BUNdle_#-^${PASSHALF}" ACCOUNT UNLOCK;
GRANT CREATE SESSION TO DBA_BUNDLEEXP7;
GRANT DBA TO DBA_BUNDLEEXP7;
-- The following privileges to workaround Bug 6392040:
GRANT EXECUTE ON SYS.DBMS_DEFER_IMPORT_INTERNAL TO DBA_BUNDLEEXP7;
GRANT EXECUTE ON SYS.DBMS_EXPORT_EXTENSION TO DBA_BUNDLEEXP7;
PROMPT CREATING DIRECTORY EXPORT_FILES_DBA_BUNDLE POINTING TO ${LOC1} ...
CREATE OR REPLACE DIRECTORY EXPORT_FILES_DBA_BUNDLE AS '${LOC1}';
PROMPT
PROMPT CREATING AFTER TABLE IMPORT SCRIPT ...
PROMPT
SET PAGES 0 TERMOUT OFF LINESIZE 157 ECHO OFF FEEDBACK OFF
SPOOL ${SPOOLFILE2}
SELECT 'CREATE SYNONYM '||OWNER||'.'||SYNONYM_NAME||' FOR '||TABLE_OWNER||'.'||TABLE_NAME||';' FROM DBA_SYNONYMS
WHERE TABLE_OWNER||'.'||TABLE_NAME in (${TABLELIST}) AND OWNER <> UPPER('PUBLIC')
UNION
SELECT 'CREATE PUBLIC SYNONYM '||SYNONYM_NAME||' FOR '||TABLE_OWNER||'.'||TABLE_NAME||';' FROM DBA_SYNONYMS
WHERE TABLE_OWNER||'.'||TABLE_NAME in (${TABLELIST}) AND OWNER=UPPER('PUBLIC');
SPOOL OFF
EOF
)
# Creation of the Post Export Script:
export EXPORTSCRIPT=${LOC1}/EXPORTSCRIPT.sh
export EXPORTSCRIPTRUNNER=${LOC1}/EXPORTSCRIPTRUNNER.sh
echo "# Export Script: [Created By DBA_BUNDLE]" > ${EXPORTSCRIPT}
echo "export ORACLE_SID=${ORACLE_SID}" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "echo 'Running The Export Job Now ...'" >>${EXPORTSCRIPT}
echo "${MKNOD}" >>${EXPORTSCRIPT}
echo "sleep 1" >>${EXPORTSCRIPT}
echo "${ZIP}" >>${EXPORTSCRIPT}
echo "${ORACLE_HOME}/bin/exp DBA_BUNDLEEXP7/\"BUNdle_#-^${PASSHALF}\" TABLES=${TABLESVAR} DIRECT=y CONSISTENT=y STATISTICS=NONE FEEDBACK=100000 ${EXPORTSCN} RESUMABLE=y RESUMABLE_NAME=DBA_BUNDLE_EXPORT RESUMABLE_TIMEOUT=86400 FILE=${EXPORTDUMP} log=${LOGFILE}" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "echo 'Running Post Export Steps ...'" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "${ORACLE_HOME}/bin/sqlplus -s '/ as sysdba' << EOF" >>${EXPORTSCRIPT}
echo "PROMPT" >>${EXPORTSCRIPT}
echo "PROMPT DROPPING THE EXPORTER USER DBA_BUNDLEEXP7 ..." >>${EXPORTSCRIPT}
echo "DROP USER DBA_BUNDLEEXP7 CASCADE;" >>${EXPORTSCRIPT}
echo "EOF" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "sleep 3" >>${EXPORTSCRIPT}
echo "${REMOVEMKDON}" >>${EXPORTSCRIPT}
echo "echo \"*****************\"" >>${EXPORTSCRIPT}
echo "echo \"IMPORT GUIDELINES:\"" >>${EXPORTSCRIPT}
echo "echo \"*****************\"" >>${EXPORTSCRIPT}
echo "echo \"FLASHBACK SCN used for this export is: ${CURRENT_SCN}\"" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "echo \"${UNZIPMESSAGE}\"" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "echo \"AFTER IMPORTING THE DUMPFILE, RUN THIS SQL SCRIPT: ${SPOOLFILE2}\"" >>${EXPORTSCRIPT}
echo "echo \"IT INCLUDES (PRIVATE & PUBLIC SYNONYMS DDLS) WHICH WILL NOT BE HANDELED BY THE IMPORT PROCESS.\"" >>${EXPORTSCRIPT}
echo "echo ''" >>${EXPORTSCRIPT}
echo "echo \"**************************\"" >>${EXPORTSCRIPT}
echo "echo \"EXPORT DUMP FILE LOCATION: ${EXPORTDUMPOUTPUT}\"" >>${EXPORTSCRIPT}
echo "echo \"**************************\"" >>${EXPORTSCRIPT}
echo "export JOBSTATUS=\`grep \"successfully\\|stopped\\|completed\" ${LOGFILE}|tail -1\`" >>${EXPORTSCRIPT}
echo "export LOGFILE=${LOGFILE}" >>${EXPORTSCRIPT}
echo "export EMAILID=\"${EMAILANS}\"" >>${EXPORTSCRIPT}
echo "${SENDEMAIL}" >>${EXPORTSCRIPT}
chmod 740 ${EXPORTSCRIPT}
echo
echo "#!/bin/bash" > ${EXPORTSCRIPTRUNNER}
echo "nohup sh ${EXPORTSCRIPT}| tee ${LOGFILE} 2>&1 &" >>${EXPORTSCRIPTRUNNER}
chmod 740 ${EXPORTSCRIPTRUNNER}
echo -e "\033[32;5mFeel free to EXIT from this session as the EXPORT SCRIPT is running in the BACKGROUND.\033[0m"
source ${EXPORTSCRIPTRUNNER}
echo; exit ;;
*) echo "Enter a valid number:"
echo "====================="
echo "i.e."
echo "1 for expdp tool"
echo "2 for exp tool"
echo ;;
esac
done
break;;
*) echo "Enter a NUMBER between 1 to 3 boss:"
echo "==================================" ;;
esac
done
# #############
# END OF SCRIPT
# #############
# DISCLAIMER: THIS SCRIPT IS DISTRIBUTED IN THE HOPE THAT IT WILL BE USEFUL, BUT WITHOUT ANY WARRANTY. IT IS PROVIDED "AS IS".
# Do not live under a rock :-) Every month a new version of DBA_BUNDLE get released, download it by visiting:
# http://dba-tips.blogspot.com/2014/02/oracle-database-administration-scripts.html
# REPORT BUGs to: mahmmoudadel@hotmail.com
view raw export_data hosted with ❤ by GitHub