Advanced Product Service Oracle Database 10g : Administration Workshop II Creation Date : 2009년 04월 07일 Last Updated : 2009년 04월 11일 Version : 1.0
실습환경설정 cd $HOME/mylabs cat env.sh export ORACLE_BASE=/u01/app/oracle export ORACLE_HOME=$ORACLE_BASE/product/10.2.0/db_1 export ORACLE_SID=orcl export PATH=$ORACLE_HOME/bin:$ORACLE_HOME/jdk/bin:$PATH export PS1=[\$ORACLE_SID:\$PWD\] export NLS_DATE_FORMAT="yyyy-mm-dd hh24:mi:ss" export NLS_LANG=AMERICAN_AMERICA.WE8ISO8859P1 alias orcl='export ORACLE_SID=orcl' alias catdb='export ORACLE_SID=catdb' alias asm='export ORACLE_SID=+ASM' alias sql='sqlplus / as sysdba' cat env.sh >> $HOME/.bash_profile source $HOME/.bash_profile cat login.sql define _editor=vi set linesize 130 set pagesize 50 set serveroutput on set sqlprompt "_USER'@'_CONNECT_IDENTIFIER>" cat login.sql >> $ORACLE_HOME/sqlplus/admin/glogin.sql cat startup.sh lsnrctl start sqlplus / as sysdba <<EOF startup exit EOF emctl start dbconsole isqlplusctl start $HOME/mylabs/startup.sh $HOME/mylabs/unlock.sh
Chapter 2. Configuring Recovery Manager # 전체모든과정의실습을초기화하기위해현재상태의 DB 를 Backup 한다. $HOME/labs/lab_02_copy.sh # Archivelog Mode 확인후 Archivelog Mode 로변환 sqlplus / as sysdba > archive log list Database log mode Automatic archival Archive destination No Archive Mode Disabled USE_DB_RECOVERY_FILE_DEST Oldest online log sequence 2 Current log sequence 4 > show parameter spfile NAME TYPE VALUE ----------------------- ----------- ------------------------------ spfile string /u01/app/oracle/product/10.2.0/db_1/dbs/spfileorcl.ora > show parameter db_recovery_file_dest NAME TYPE VALUE ------------------------------------ ----------- ------------------------------ db_recovery_file_dest string /u01/app/oracle/flash_recovery_area db_recovery_file_dest_size big integer 2G > alter system set db_recovery_file_dest_size = 4G ; > shutdown immediate > startup mount > alter database archivelog ; > archive log list Database log mode Automatic archival Archive destination Archive Mode Enabled USE_DB_RECOVERY_FILE_DEST Oldest online log sequence 2 Next log sequence to archive 4 Current log sequence 4 > alter database open ; > exit # Catalog DB 생성후 Repository 생성및등록 export ORACLE_SID=catdb
cat $HOME/mylabs/cre_catdb.sh orapwd file=$oracle_home/dbs/orapwcatdb password=oracle sqlplus / as sysdba <<EOF create spfile from pfile='$home/mylabs/initcatdb.ora' ; startup nomount create database catdb user sys identified by oracle user system identified by oracle character set AL32UTF8 undo tablespace undotbs default temporary tablespace temp ; @$ORACLE_HOME/rdbms/admin/catalog.sql @$ORACLE_HOME/rdbms/admin/catproc.sql conn system/oracle @$ORACLE_HOME/sqlplus/admin/pupbld.sql exit EOF $HOME/mylabs/cre_catdb.sh -- 약 10~15분정도소요된다. sqlplus / as sysdba > create tablespace rc_tbs ; > create user rman identified by rman default tablespace rc_tbs ; > grant resource, connect, recovery_catalog_owner to rman ; > exit cp $HOME/mylabs/listener.ora $ORACLE_HOME/network/admin/ cp $HOME/mylabs/tnsnames.ora $ORACLE_HOME/network/admin/ lsnrctl reload export ORACLE_SID=orcl rman target / catalog=rman/rman@catdb > create catalog ; > register database ;
# EM 에 catalog db 등록 DB Home - Maintenance - Backup/Recover Settings 영역의 Recovery Catalog Settings 선택후 Add Recovery Catalog 클릭 각각의 Box 에그림과같이필요한정보입력후 Next 정보확인후 Finish 선택하면다음그림에서 Use Recovery Catalog 선택후 OK
# 여기까지진행하였다면교재 B-2 page 의 2 번문제부터진행한다. # 2장과관련된실습이끝나면추가적인실습및결과확인을위해다음의작업진행한다. ( 부분적으로 Chapter 3 의실습도미리진행된다 ) export ORACLE_SID=orcl rman target / catalog=rman/rman@catdb # 실습을위해보존해야하는 Backup 의수를 1로바꾼다. > configure retention policy to redundancy 1 ; # 현재 Backup 대상이되는 file 들과 Backup 이필요한파일을확인 > report schema ; > report need backup ; RMAN retention policy will be applied to the command RMAN retention policy is set to redundancy 1 Report of files with less than 1 redundant backups File #bkps Name ---- ----- ----------------------------------------------------- 1 0 /u01/app/oracle/oradata/orcl/system01.dbf 2 0 /u01/app/oracle/oradata/orcl/undotbs01.dbf 3 0 /u01/app/oracle/oradata/orcl/sysaux01.dbf 4 0 /u01/app/oracle/oradata/orcl/users01.dbf 5 0 /u01/app/oracle/oradata/orcl/example01.dbf file 6 is excluded from whole database backup # 현재 Backup 된정보확인 > list backup of database ; -- 아무것도안나와야한다. # Incremental Backup 의기준이되는 Backup을압축해서진행 > backup as compressed backupset incremental level 0 database ; # 특정 tablespace 에속한파일들을 Backup > backup as backupset tablespace users, example ;
# 수동으로채널할당하여특정 datafile 만 Backup > run { allocate channel c1 device type disk; backup as compressed backupset datafile 1 ; sql "alter system archive log current" ; } # Backup 된정보확인 > list backup of database ; # 보존정책상필요없는 Backup 확인 > report obsolete ; # Catalog DB에수동으로 Sync. > resync catalog ; > exit # 현재 Flash Recovery Area 의사용률확인 sqlplus / as sysdba select * from v$flash_recovery_area_usage ; FILE_TYPE PCT_SPACE_USED PCT_SPACE_RECLAIMABLE NUM_OF_FILES ------------ ------------------ ------------------------- --------------- CONTROLFILE 0 0 0 ONLINELOG 0 0 0 ARCHIVELOG.06.03 2 BACKUPPIECE 6.6.33 6 IMAGECOPY 0 0 0 FLASHBACKLOG 0 0 0 # EM 을통해서도확인가능하다. (All Metrics - Recovery Area)
Chapter 3. Using Recovery Manager # 현재실습은교재 B-10 page까지완료된상태이다. B-11 page 의 6번부터실습을이어서한후아래의실습진행 # Block Change Tracking 활성화 sqlplus / as sysdba > alter database enable block change tracking using file '/home/oracle/labs/rman_change_track.f' ; > column filename format a50 > select * from v$block_change_tracking ; > exit # Incremental Backup 을진행하고결과확인 rman target / catalog rman/rman@catdb > backup as compressed backupset incremental level 1 database ; > list backup of database ; # 보존정책상불필요한 Backup 정보확인 > report obsolete ; # 보존정책상불필요한 Backup 삭제 > delete obsolete ; # 필요없는 Backup 들이삭제되었는지확인 > list backup of database ; > exit ;
Chapter 5. Database Recovery # USERS Tablespace에 TEST 테이블을생성하고임의의 Data를입력한다. - Logswitch 도진행됨 sqlplus system/oracle@orcl @$HOME/mylabs/workload.sql rman target / catalog rman/rman@catdb > backup as compressed backupset incremental level 1 database plus archivelog ; # Backup 된정보확인 > list backup of database ; > list copy ; > exit # rm 명령으로 OS 상에서 users01.dbf 파일삭제 rm /u01/app/oracle/oradata/orcl/users01.dbf # system 유저로 test 테이블검색. 결과나오는가? sqlplus system/oracle > alter system flush buffer_cache ; > select count(*) from test ; ERROR at line 1: ORA-01116: error in opening database file 4 ORA-01110: data file 4: '/u01/app/oracle/oradata/orcl/users01.dbf' ORA-27041: unable to open file Linux Error: 2: No such file or directory Additional information: 3 # RMAN 이용하여 Recovery 수행 > host rman target / catalog rman/rman@catdb > list backup of tablespace users ;
> run { sql "alter tablespace users offline immediate" ; restore tablespace users ; recover tablespace users delete archivelog ; sql "alter tablespace users online" ; } # Backup 진행 > backup as compressed backupset database ; > delete obsolete ; > exit # 결과확인 > select count(*) from test ; > drop table test purge ; # 나머지실습은교재의내용을참조한다.
Chapter 6. Flashback # 현재의 flashback mode 확인 ( 교재의실습보다먼저진행 ) sqlplus / as sysdba > select flashback_on from v$database ; FLASHBACK_ON ------------------------- NO # Guaranteed Restore Point 생성 ( 가능한가?) > create restore point first_point guarantee flashback database ; ERROR at line 1: ORA-38784: Cannot create restore point 'FIRST_POINT'. ORA-38787: Creating the first guaranteed restore point requires mount mode when flashback database is off. - 첫번째 point 는 Mount 에서가능 ( Flashback logging 비활성화시 ) > shutdown immediate > startup mount > create restore point first_point guarantee flashback database ; > select current_scn from v$database ; - 번호확인할것 > select flashback_on from v$database ; FLASHBACK_ON ------------------ RESTORE POINT ONLY > alter database open ; > create table test (id number) ; > insert into test values(1) ; > commit ; # Table 삭제전 point 생성 > create restore point before_drop guarantee flashback database ; > drop table test purge ;
> select * from test ; ERROR at line 1: ORA-00942: table or view does not exist # Guaranteed Restore Point 이용하여 Flashback database > shutdown immediate > startup mount > select oldest_flashback_scn, oldest_flashback_time from v$flashback_database_log ; FLASHBACK_SCN FLASHBACK_TIME -------------------- ------------------------- 1328129 2009-04-10 18:02:00 > flashback database to restore point before_drop ; > alter database open resetlogs ; > select * from test ; > drop table test purge ; > drop restore point before_drop ; > drop restore point first_point ; > select flashback_on from v$database ; FLASHBACK_ON ------------------------- NO # Recycle Bin 사용 # 실습에필요한 user 및여러 object 등을생성 > create user user1 identified by oracle ; > grant dba to user1 ; > conn user1/oracle > create table emp as select * from scott.emp ; > alter table emp add primary key(empno) ; > create index deptno_idx on emp(deptno) ;
> create trigger emp_trig after insert on emp begin null; end; / # 현재 user1 은여러 object 들을가지고있다. 관련정보확인 > column object_name format a30 > column object_type format a30 > select object_name, object_type from user_objects ; OBJECT_NAME OBJECT_TYPE ------------------------------ ------------------------------ EMP TABLE DEPTNO_IDX INDEX SYS_C005398 INDEX EMP_TRIG TRIGGER > select constraint_name from user_constraints ; CONSTRAINT_NAME ------------------------------ SYS_C005398 # User 의실수로 Table 삭제 > drop table emp ; # emp 와연관된모든 Object 는그즉시삭제된것이아니고이름이변경되어예정저장소에저장되어있다. 각각의내용확인. > select object_name, object_type from user_objects ; OBJECT_NAME OBJECT_TYPE ------------------------------ ------------------------------ BIN$ZzgT6KzAEXPgQKjAZKsc4Q==$0 TRIGGER BIN$ZzgT6KzBEXPgQKjAZKsc4Q==$0 TABLE BIN$ZzgT6Ky/EXPgQKjAZKsc4Q==$0 INDEX BIN$ZzgT6Ky+EXPgQKjAZKsc4Q==$0 INDEX > select constraint_name from user_constraints ; CONSTRAINT_NAME ------------------------------ BIN$ZzgT6Ky9EXPgQKjAZKsc4Q==$0
> show recyclebin ORIGINA LNAME RECYCLEBIN NAME OBJECT TYPE DROP TIME ---------------- ------------------------------ ------------ ------------------- EMP BIN$ZzgT6KzBEXPgQKjAZKsc4Q==$0 TABLE 2009-04-10:18:56:02 # 확인되는 Recyclebin name 을이용하여결과검색 > select * from "BIN$ZzgT6KzBEXPgQKjAZKsc4Q==$0" ; - 결과나오는가? # 추가적으로동일한이름을사용하는 Table 생성 - 가능한가? > create table emp as select * from scott.emp ; # 예전이름으로복원이불가능하므로새로운이름으로 flashback! > flashback table emp to before drop rename to ori_emp ; > select * from ori_emp ; # 인덱스등의다른 Ojbect 는사용가능할까? > set autotrace on explain > select * from ori_emp where empno = 7788 ; -------------------------------------------------------------------------------------------------------------- Id Operation Name Rows Bytes Cost (%CPU) Time -------------------------------------------------------------------------------------------------------------- 0 SELECT STATEMENT 1 87 1 (0) 00:00:01 1 TABLE ACCESS BY INDEX ROWID ORI_EMP 1 87 1 (0) 00:00:01 * 2 INDEX UNIQUE SCAN BIN$ZzgT6Ky/EXPgQKjAZKsc4Q==$0 1 0 (0) 00:00:01 -------------------------------------------------------------------------------------------------------------- > set autotrace off # 결과확인하였다면실습정리 > conn /as sysdba > drop user user1 cascade ;
Chapter 9. Automatic Performance Management # snapshot 을수동으로생성하고결과확인 > host cat $HOME/mylabs/dba_hist_snapshot.sql select snap_id, to_char(startup_time, 'YYYY/MM/DD HH24:MI:SS') start_time, to_char(begin_interval_time,'yyyy/mm/dd HH24:MI:SS') begin_time from dba_hist_snapshot order by snap_id; > @$HOME/mylabs/dba_hist_snapshot.sql SNAP_ID START_TIME BEGIN_TIME ---------- ------------------- ------------------- 15 2009/04/10 18:44:17 2009/04/10 19:00:34 16 2009/04/10 18:44:17 2009/04/10 20:00:38 17 2009/04/10 18:44:17 2009/04/10 21:00:37 > exec dbms_workload_repository.create_snapshot > @$HOME/mylabs/dba_hist_snapshot.sql SNAP_ID START_TIME BEGIN_TIME ---------- ------------------- ------------------- 15 2009/04/10 18:44:17 2009/04/10 19:00:34 16 2009/04/10 18:44:17 2009/04/10 20:00:38 17 2009/04/10 18:44:17 2009/04/10 21:00:37 18 2009/04/10 18:44:17 2009/04/10 22:00:42 # 시스템에부하를줄수있는문장을수행후결과확인 > host cat $HOME/mylabs/dy_sql.sql declare v_cnt number ; v_start number default dbms_utility.get_time; begin for i in 1..100000 loop execute immediate 'select count(*) from dual where dummy=to_char(' i ')' into v_cnt ; end loop ; dbms_output.put_line(round((dbms_utility.get_time-v_start)/100, 2) 'seconds'); end ; / > @$HOME/mylabs/dy_sql.sql
# snapshot 을다시생성하고결과분석 > exec dbms_workload_repository.create_snapshot # AWR Report 생성 ( 각각의요청에알맞은값입력필요 ) > @?/rdbms/admin/awrrpt.sql -- 결과는교재와같이 EM 으로확인가능 Specify the Report Type ~~~~~~~~~~~~~~~~~~~~~~~ Would you like an HTML report, or a plain text report? Enter 'html' for an HTML report, or 'text' for plain text Defaults to 'html' Enter value for report_type: text Enter value for num_days: Specify the Begin and End Snapshot Ids ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Enter value for begin_snap: 14 Enter value for end_snap: 19 Specify the Report Name ~~~~~~~~~~~~~~~~~~~~~~~ The default report file name is awrrpt_1_14_19.txt. To use this name, press <return> to continue, otherwise enter an alternative. Enter value for report_name: awrrpt.txt > host vi awrrpt.txt # ADDM Report 생성 > @?/rdbms/admin/addmrpt.sql Specify the Begin and End Snapshot Ids ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Enter value for begin_snap: 14 Enter value for end_snap: 19 Specify the Report Name ~~~~~~~~~~~~~~~~~~~~~~~ The default report file name is addmrpt_1_14_19.txt. To use this name, press <return> to continue, otherwise enter an alternative. Enter value for report_name: addmrpt.txt > host vi addmrpt.txt
# EM 에서결과확인 HOME - Advisor Central 에서방금진행한결과선택후 View Result 클릭 각각의 Recommendations 확인하고필요한부분은 Implement 선택 # buffer busy waits 관련사항확인여러개의세션이동시에같은테이블에동시에 insert 를수행하는경우, 세그먼트영역이급속히확장되고, 이로인해다양한성능문제가유발된다. buffer lock 경합으로인한 buffer busy waits 대기현상이그중하나이다. /* Segment Space Management : Manual */ > conn /as sysdba > startup force > conn system/oracle > create tablespace tbs_m datafile '$ORACLE_HOME/dbs/tbs_m.dbf' size 50m autoextend on segment space management manual ;
> create table test_m(id char(1000)) tablespace tbs_m ; > create or replace procedure do_insert is begin for i in 1..10000 loop insert into test_m values(i); commit ; end loop ; end ; / # buffer busy waits 정보확인 > host cat $HOME/mylabs/system_event.sql select event, total_waits, time_waited from v$system_event where event = 'buffer busy waits' or event like 'enq: HW%' ; > @$HOME/mylabs/system_event.sql no rows selected > host cat $HOME/mylabs/session_event.sql select event, total_waits, time_waited from v$session_event where sid = (select sid from v$mystat where rownum = 1 ) and (event='buffer busy waits' or event like 'enq: HW%') order by 3 desc ; > @$HOME/mylabs/session_event.sql no rows selected
# 동시작업생성후실행중인 Job 확인 > host cat $HOME/mylabs/submit_job.sql var job_no number ; begin for idx in 1..10 loop dbms_job.submit(:job_no,'do_insert;',sysdate+1/86400,'sysdate+1/86400') ; commit ; end loop ; end; / > @$HOME/mylabs/submit_job.sql > host cat $HOME/mylabs/dba_jobs.sql select job, log_user, next_date, next_sec, broken, what from dba_jobs where log_user = USER order by 1; > @$HOME/mylabs/dba_jobs.sql > @$HOME/mylabs/dba_jobs_running 현재실행중인 JOB 이 10개가넘어가는지확인하고안넘어간다면좀더기다렸다가다음진행 (10개의행이보일때까지반복실행 ) > / # 현재 session 에서의 Wait Event 확인을위해 do_insert 실행 > exec do_insert # 작업제거 > host cat $HOME/mylabs/remove_job.sql begin for x in ( select job from dba_jobs where log_user = USER ) loop dbms_job.remove(x.job) ; end loop ; end; /
> @$HOME/mylabs/remove_job.sql > @$HOME/mylabs/dba_jobs.sql # 결과확인 > @$HOME/mylabs/system_event.sql EVENT TOTAL_WAITS TIME_WAITED ------------------------------------------------------------ ---------------- ---------------- buffer busy waits 217,794 145,853 enq: HW - contention 35,926 56,318 > @$HOME/mylabs/session_event.sql EVENT TOTAL_WAITS TIME_WAITED ------------------------------------------------------------ ---------------- ---------------- buffer busy waits 11,079 7,921 enq: HW - contention 1,924 4,205 # 작업초기화후 Segment Space Management Auto 확인 > drop procedure do_insert ; > drop table test_m purge ; > drop tablespace tbs_m including contents and datafiles ; > conn /as sysdba > startup force /* Segment Space Management : Auto */ > conn system/oracle > create tablespace tbs_a datafile '$ORACLE_HOME/dbs/tbs_a.dbf' size 50m autoextend on segment space management auto ; > create table test_a(id char(1000)) tablespace tbs_a ; > create or replace procedure do_insert is begin for i in 1..10000 loop insert into test_a values(i); commit ; end loop ; end ; /
# 현재 Event 정보확인 > @$HOME/mylabs/system_event.sql no rows selected > @$HOME/mylabs/session_event.sql no rows selected # 동시작업생성후실행중인 Job 확인 > @$HOME/mylabs/submit_job.sql > @$HOME/mylabs/dba_jobs.sql > @$HOME/mylabs/dba_jobs_running # 현재 session 에서의 Wait Event 확인을위해 do_insert 실행 > exec do_insert # 작업제거 > @$HOME/mylabs/remove_job.sql > @$HOME/mylabs/dba_jobs.sql # 결과확인 > @$HOME/mylabs/system_event.sql EVENT TOTAL_WAITS TIME_WAITED ----------------------------------------------------------- ---------------- ---------------- buffer busy waits 15,658 13,375 enq: HW - contention 849 18,779 > @$HOME/mylabs/session_event.sql EVENT TOTAL_WAITS TIME_WAITED ------------------------------------------------------------ ---------------- ---------------- enq: HW - contention 65 1,780 buffer busy waits 659 574 # 작업초기화후 Segment Space Management Auto 확인 > drop procedure do_insert ; > drop table test_a purge ; > drop tablespace tbs_a including contents and datafiles ; > conn /as sysdba > startup force
/* Segment Space Management : Auto with Partitioned Table */ > conn system/oracle > create tablespace tbs_p datafile '$ORACLE_HOME/dbs/tbs_p.dbf' size 50m autoextend on segment space management auto ; > create table test_p(id char(1000)) tablespace tbs_p partition by hash (id) partitions 8 ; > create or replace procedure do_insert is begin for i in 1..10000 loop insert into test_p values(i); commit ; end loop ; end ; / # 동시작업생성후실행중인 Job 확인 > @$HOME/mylabs/submit_job.sql > @$HOME/mylabs/dba_jobs.sql > @$HOME/mylabs/dba_jobs_running # 현재 session 에서의 Wait Event 확인을위해 do_insert 실행 > exec do_insert # 작업제거 > @$HOME/mylabs/remove_job.sql > @$HOME/mylabs/dba_jobs.sql # 결과확인 > @$HOME/mylabs/system_event.sql EVENT TOTAL_WAITS TIME_WAITED ------------------------------------------------------------ ---------------- ---------------- buffer busy waits 3,483 7,325 enq: HW - contention 2,334 49,254
> @$HOME/mylabs/session_event.sql EVENT TOTAL_WAITS TIME_WAITED ------------------------------------------------------------ ---------------- ---------------- enq: HW - contention 212 4,601 buffer busy waits 238 591 > drop procedure do_insert ; > drop table test_p purge ; > drop tablespace tbs_p including contents and datafiles ; > exit # SQL Access Advisor 사용 sqlplus / as sysdba > grant advisor, query rewrite to sh ; # 시스템에서대량의작업을진행하는문장을찾았고각각의성능과관련된이슈사항들을확인 > conn sh/sh > set autotrace traceonly explain > host cat $HOME/mylabs/mview1.sql select s.prod_id, s.cust_id, sum(s.amount_sold) sum_amount from sales s, customers cs where s.cust_id = cs.cust_id and cs.country_id >= 52772 group by s.prod_id, s.cust_id ; > @$HOME/mylabs/mview1.sql Execution Plan ---------------------------------------------------------- Plan hash value: 3821284699 ----------------------------------------------------------------------------------------------------------- Id Operation Name Rows Bytes TempSpc Cost (%CPU) Time Pstart Pstop ----------------------------------------------------------------------------------------------------------- 0 SELECT STATEMENT 359K 8423K 5977 (4) 00:01:12 1 HASH GROUP BY 359K 8423K 44M 5977 (4) 00:01:12 * 2 HASH JOIN 918K 21M 1000K 1965 (4) 00:00:24 * 3 TABLE ACCESS FULL CUSTOMERS 46411 453K 332 (2) 00:00:04 4 PARTITION RANGE ALL 918K 12M 426 (9) 00:00:06 1 28 5 TABLE ACCESS FULL SALES 918K 12M 426 (9) 00:00:06 1 28
> host cat $HOME/mylabs/mview2.sql select s.prod_id, sum(s.amount_sold) sum_amount from sales s, customers cs where s.cust_id = cs.cust_id and cs.country_id >= 52772 group by s.prod_id ; > @$HOME/mylabs/mview2.sql Execution Plan ---------------------------------------------------------- Plan hash value: 3821284699 ----------------------------------------------------------------------------------------------------------- Id Operation Name Rows Bytes TempSpc Cost (%CPU) Time Pstart Pstop ----------------------------------------------------------------------------------------------------------- 0 SELECT STATEMENT 72 1728 2083 (10) 00:00:25 1 HASH GROUP BY 72 1728 2083 (10) 00:00:25 * 2 HASH JOIN 918K 21M 1000K 1965 (4) 00:00:24 * 3 TABLE ACCESS FULL CUSTOMERS 46411 453K 332 (2) 00:00:04 4 PARTITION RANGE ALL 918K 12M 426 (9) 00:00:06 1 28 5 TABLE ACCESS FULL SALES 918K 12M 426 (9) 00:00:06 1 28 ----------------------------------------------------------------------------------------------------------- > set autotrace off # SQL Access Advisor 를이용하여 Mview를생성할수있는문장수집 >host cat $HOME/mylabs/create_mview.sql declare name varchar2(20) := 'tune_cust_mv' ; begin dbms_advisor.tune_mview(name, 'create materialized view cust_mv enable query rewrite as select s.prod_id, s.cust_id, sum(s.amount_sold) sum_amount from sales s, customers cs where s.cust_id = cs.cust_id and cs.country_id >= 52772 group by s.prod_id, s.cust_id') ; end; / > @$HOME/mylabs/create_mview.sql
# 결과확인 > set long 20000 > select action_id, statement from user_tune_mview where task_name ='tune_cust_mv' order by action_id ; ACTION_ID STATEMENT ---------- -------------------------------------------------------------------------------- 3 CREATE MATERIALIZED VIEW LOG ON "SH"."SALES" WITH ROWID, SEQUENCE ("PROD_ID","CUST_ID","AMOUNT_SOLD") INCLUDING NEW VALUES 4 ALTER MATERIALIZED VIEW LOG FORCE ON "SH"."SALES" ADD ROWID, SEQUENCE ("PROD_ID","CUST_ID","AMOUNT_SOLD") INCLUDING NEW VALUES 5 CREATE MATERIALIZED VIEW LOG ON "SH"."CUSTOMERS" WITH ROWID, SEQUENCE ("CUST_ID","COUNTRY_ID") INCLUDING NEW VALUES 6 ALTER MATERIALIZED VIEW LOG FORCE ON "SH"."CUSTOMERS" ADD ROWID, SEQUENCE ("CUST_ID","COUNTRY_ID") INCLUDING NEW VALUES 7 CREATE MATERIALIZED VIEW SH.CUST_MV REFRESH FAST WITH ROWID ENABLE QUERY REWRITE AS SELECT SH.CUSTOMERS.COUNTRY_ID C1, SH.SALES.CUST_ID C2, SH.SALES.PROD_ID C3, SUM("SH"."SALES"."AMOUNT_SOLD") M1, COUNT("SH"."SALES"."AMOUNT_SOLD") M2, COUNT(*) M3 FROM SH.CUSTOMERS, SH.SALES WHERE SH.SALES.CUST_ID = SH.CUSTOMERS.CUST_ID AND (SH.CUSTOMERS.COUNTRY_ID >= 52772) GROUP BY SH.CUSTOMERS.COUNTRY_ID, SH.SALES.CUST_ID, SH.SALES.PROD_ID 8 DROP MATERIALIZED VIEW SH.CUST_MV # 결과로나온문장들을잘편집하여그대로실행 > CREATE MATERIALIZED VIEW LOG ON "SH"."SALES" WITH ROWID, SEQUENCE ("PROD_ID","CUST_ID","AMOUNT_SOLD") INCLUDING NEW VALUES ; > ALTER MATERIALIZED VIEW LOG FORCE ON "SH"."SALES" ADD ROWID, SEQUENCE ("PROD_ID","CUST_ID","AMOUNT_SOLD") INCLUDING NEW VALUES; > CREATE MATERIALIZED VIEW LOG ON "SH"."CUSTOMERS" WITH ROWID, SEQUENCE ("CUST_ID","COUNTRY_ID") INCLUDING NEW VALUES; > ALTER MATERIALIZED VIEW LOG FORCE ON "SH"."CUSTOMERS" ADD ROWID, SEQUENCE ("CUST_ID","COUNTRY_ID") INCLUDING NEW VALUES;
> CREATE MATERIALIZED VIEW SH.CUST_MV REFRESH FAST WITH ROWID ENABLE QUERY REWRITE AS SELECT SH.CUSTOMERS.COUNTRY_ID C1, SH.SALES.CUST_ID C2, SH.SALES.PROD_ID C3, SUM("SH"."SALES"."AMOUNT_SOLD") M1, COUNT("SH"."SALES"."AMOUNT_SOLD") M2, COUNT(*) M3 FROM SH.CUSTOMERS, SH.SALES WHERE SH.SALES.CUST_ID = SH.CUSTOMERS.CUST_ID GROUP BY SH.CUSTOMERS.COUNTRY_ID,SH.SALES.CUST_ID, SH.SALES.PROD_ID ; # 위에서실행했던 SQL 을다시실행하면서 Mview 의사용여부를확인 > set autotrace traceonly explain > @$HOME/mylabs/mview1.sql Execution Plan ---------------------------------------------------------- Plan hash value: 2867986245 ----------------------------------------------------------------------------------------- Id Operation Name Rows Bytes Cost (%CPU) Time ----------------------------------------------------------------------------------------- 0 SELECT STATEMENT 285K 14M 292 (16) 00:00:04 1 HASH GROUP BY 285K 14M 292 (16) 00:00:04 * 2 MAT_VIEW REWRITE ACCESS FULL CUST_MV 285K 14M 258 (5) 00:00:04 > @$HOME/mylabs/mview2.sql Execution Plan ---------------------------------------------------------- Plan hash value: 2867986245 ----------------------------------------------------------------------------------------- Id Operation Name Rows Bytes Cost (%CPU) Time ----------------------------------------------------------------------------------------- 0 SELECT STATEMENT 285K 10M 292 (16) 00:00:04 1 HASH GROUP BY 285K 10M 292 (16) 00:00:04 * 2 MAT_VIEW REWRITE ACCESS FULL CUST_MV 285K 10M 258 (5) 00:00:04 > exit
Chapter 10. Managing Schema Objects /* Partitioned Table 사용시 Index type 의중요성 (Global or Local) */ # 실습용테이블생성 (BIG_TABLE) sqlplus system/oracle > host cat $HOME/mylabs/create_big_tab.sql create table big_table nologging as select rownum id, a.* from all_objects a where 1=0 ; > @$HOME/mylabs/create_big_tab.sql # Dummy Data 입력 (100만건) > host cat $HOME/mylabs/load_data_big_tab.sql declare l_cnt number ; l_rows number := 1000000 ; begin insert /*+ append */ into big_table select rownum, a.* from all_objects a; l_cnt := sql%rowcount ; commit ; while (l_cnt < l_rows) loop insert /*+ append */ into big_table select rownum + l_cnt, owner, object_name, subobject_name, object_id, data_object_id, object_type, created, last_ddl_time, timestamp, status, temporary, generated, secondary from big_table where rownum <= l_rows-l_cnt ; l_cnt := l_cnt + sql%rowcount ; commit ; end loop ; end; > @$HOME/mylabs/load_data_big_tab.sql > select count(*) from big_table;
/* Hash Partitioned Table 생성 */ > select object_id, count(*) from big_table group by object_id ; OBJECT_ID COUNT(*) ---------- ----------. 52376 20 52392 20 52390 20 52442 20 52443 20 52454 20 52457 20 49742 rows selected. # object_id 를기준으로 Hash Partitioned Table 생성 ( 10개의 partition) > create table big_table_hashed nologging partition by hash(object_id) partitions 10 as select * from big_table ; # object_id 가 20이며 owner 가 'SPECIAL_ROW' 인행을각각하나씩입력 - 각각의테이블에는전체 Data 중에는 SPECIAL_ROW 의이름을갖는 행은하나만존재함 ( 즉, 하나의 Segment 에만존재하는값임 ) > host cat $HOME/mylabs/insert_row.sql insert into big_table(owner, object_name, object_id, last_ddl_time, created) select 'SPECIAL_ROW', object_name, 20, last_ddl_time, created from big_table where rownum = 1 ; insert into big_table_hashed(owner, object_name, object_id, last_ddl_time, created) select 'SPECIAL_ROW', object_name, 20, last_ddl_time, created from big_table_hashed where rownum = 1 ; commit ; > @$HOME/mylabs/insert_row.sql # owner 컬럼을기준으로인덱스생성 > create index big_idx1 on big_table(owner) ; # Local index 를 owner 컬럼을기준으로생성 > create index big_hash_idx1 on big_table_hashed(owner) local ;
# 통계정보수집 > exec dbms_stats.gather_table_stats('system','big_table',cascade=>true) > exec dbms_stats.gather_table_stats('system','big_table_hashed', - cascade=>true) # 각각의실행계획비교 > explain plan for select * from big_table where owner = :x ; > @$ORACLE_HOME/rdbms/admin/utlxpls.sql PLAN_TABLE_OUTPUT ---------------------------------------------------------------------------------------------------------------------------------- Plan hash value: 3798222444 ----------------------------------------------------------------------------------------- Id Operation Name Rows Bytes Cost (%CPU) Time ----------------------------------------------------------------------------------------- 0 SELECT STATEMENT 49783 4764K 1508 (1) 00:00:19 1 TABLE ACCESS BY INDEX ROWID BIG_TABLE 49783 4764K 1508 (1) 00:00:19 * 2 INDEX RANGE SCAN BIG_IDX1 49783 120 (2) 00:00:02 > explain plan for select * from big_table_hashed where owner = :x ; > @$ORACLE_HOME/rdbms/admin/utlxpls.sql PLAN_TABLE_OUTPUT ---------------------------------------------------------------------------------------------------------------------------------- Plan hash value: 1586940396 ----------------------------------------------------------------------------------------------------------------------- Id Operation Name Rows Bytes Cost (%CPU) Time Pstart Pstop ----------------------------------------------------------------------------------------------------------------------- 0 SELECT STATEMENT 47657 4560K 1795 (1) 00:00:22 1 PARTITION HASH ALL 47657 4560K 1795 (1) 00:00:22 1 10 2 TABLE ACCESS BY LOCAL INDEX ROWID BIG_TABLE_HASHED 47657 4560K 1795 (1) * 3 INDEX RANGE SCAN BIG_HASH_IDX1 47657 122 (1) 00:00:02 1 10 # 둘의차이점은어떠한가? 전체 data 중에한개의행을찾아갈때 Partitioned index 는각각의 partition 영역을모두스캔하고있음을확인할수있다. 왜그런가? # 각각의테이블에서 SPECIAL_ROW 의이름을갖고있는행을각각 1000번씩검색하면서 SQL Trace를분석해본다. > conn system/oracle > alter session set events '10046 trace name context forever, level 12' ;
> host cat $HOME/mylabs/perf_test.sql declare tab_rec big_table%rowtype ; begin for i in 1..1000 loop select * into tab_rec from big_table where owner = 'SPECIAL_ROW' ; end loop ; end ; / declare tab_rec big_table_hashed%rowtype ; begin for i in 1..1000 loop select * into tab_rec from big_table_hashed where owner = 'SPECIAL_ROW' ; end loop ; end ; / > @$HOME/mylabs/perf_test.sql > alter session set events '10046 trace name context off' ; # SQL Trace File의위치확인및분석 > host cat $HOME/mylabs/trace_tkprof.sql SELECT 'host tkprof ' d.value '/' lower(c.instance_name) '_ora_' b.spid '.trc' ' $HOME/output.txt sys=no' "User Trace File" FROM v$session a, v$process b, v$instance c, v$parameter d, ( select * from v$mystat where rownum = 1 ) e WHERE a.paddr = b.addr AND a.sid = e.sid AND d.name = 'user_dump_dest' > @$HOME/mylabs/trace_tkprof.sql User Trace File ----------------------------------------------------------------------------------------------------------------------------- host tkprof /u01/app/oracle/admin/orcl/udump/orcl_ora_23008.trc $HOME/output.txt sys=no
# 위의실행결과를그대로복사해서실행 > host tkprof /u01/app/oracle/admin/orcl/udump/orcl_ora_23008.trc $HOME/output.txt sys=no > host vi $HOME/output.txt SELECT * FROM BIG_TABLE WHERE OWNER = 'SPECIAL_ROW' call count cpu elapsed disk query current rows ------- ------ -------- ---------- ---------- ---------- ---------- ---------- Parse 1 0.00 0.00 0 0 0 0 Execute 1000 0.12 0.12 0 0 0 0 Fetch 1000 0.11 0.16 3 4000 0 1000 ------- ------ -------- ---------- ---------- ---------- ---------- ---------- total 2001 0.24 0.29 3 4000 0 1000 Misses in library cache during parse: 1 Optimizer mode: ALL_ROWS Parsing user id: 5 (recursive depth: 1) Rows Row Source Operation ------- --------------------------------------------------- 1000 TABLE ACCESS BY INDEX ROWID BIG_TABLE (cr=4000 pr=3 pw=0 time=176610 us) 1000 INDEX RANGE SCAN BIG_IDX1 (cr=3000 pr=2 pw=0 time=82324 us)(object id 52632) Elapsed times include waiting on following events: Event waited on Times Max. Wait Total Waited ---------------------------------------- Waited ---------- ------------ db file sequential read 3 0.02 0.05 -------------------------------------------------------------------------------------------------------------------------------------------------------------- SELECT * FROM BIG_TABLE_HASHED WHERE OWNER = 'SPECIAL_ROW' call count cpu elapsed disk query current rows ------- ------ -------- ---------- ---------- ---------- ---------- ---------- Parse 1 0.00 0.01 0 0 0 0 Execute 1000 0.14 0.13 0 0 0 0 Fetch 1000 0.79 0.85 1 21000 0 1000 ------- ------ -------- ---------- ---------- ---------- ---------- ---------- total 2001 0.94 1.00 1 21000 0 1000 Misses in library cache during parse: 1 Optimizer mode: ALL_ROWS Parsing user id: 5 (recursive depth: 1) Rows Row Source Operation ------- --------------------------------------------------- 1000 PARTITION HASH ALL PARTITION: 1 10 (cr=21000 pr=1 pw=0 time=889578 us) 1000 TABLE ACCESS BY LOCAL INDEX ROWID BIG_TABLE_HASHED PARTITION: 1 10 (cr=21000 pr=1 pw=0 time=775003 us) 1000 INDEX RANGE SCAN BIG_HASH_IDX1 PARTITION: 1 10 (cr=20000 pr=0 pw=0 time=288935 us)(object id 52633) Elapsed times include waiting on following events: Event waited on Times Max. Wait Total Waited ---------------------------------------- Waited ---------- ------------ db file sequential read 1 0.01 0.01 # Local Index 는 Table 과동일한 Partition Key / Partition type으로분할되어있다. 때문에각각의 B-Tree 구조의인덱스세그먼트가독립적으로존재하는것과마찬가지이다. 위와같은경우 partition key 에대한조건식이함께들어오지않는다면 owner 의조건에만족하는행을찾기위해모든 partition 영역을검색해야한다. 이는 Partition 되어있는테이블에서극히소량의 Data 를찾을때성능상악영향을미칠수있다.
# Global Index 생성후확인 > conn system/oracle > drop index big_hash_idx1 ; > create index big_hash_idx1 on big_table_hashed(owner) global partition by range (owner) ( partition p1 values less than ('F'), partition p2 values less than ('M'), partition p3 values less than ('T'), partition p4 values less than (MAXVALUE) ); - 새로만들어지는 Global Index 는 owner 컬럼을기준으로 Partitioning 을하고있다. 이는 SPECIAL_ROW 의이름은갖는행은하나의 partition 영역인 p3 에만들어갈수있음을의미한다. > exec dbms_stats.gather_table_stats('system','big_table_hashed', - cascade=>true) # 실행계획분석후위의결과와차이점분석 > explain plan for select * from big_table_hashed where owner = :x ; > @$ORACLE_HOME/rdbms/admin/utlxpls.sql PLAN_TABLE_OUTPUT ---------------------------------------------------------------------------------------------------------------------------------- Plan hash value: 2516500005 ------------------------------------------------------------------------------------------------------------------------ Id Operation Name Rows Bytes Cost (%CPU) Time Pstart Pstop ------------------------------------------------------------------------------------------------------------------------ 0 SELECT STATEMENT 49883 4773K 1904(1) 00:00:23 1 PARTITION RANGE SINGLE 49883 4773K 1904(1) 00:00:23 KEY KEY 2 TABLE ACCESS BY GLOBAL INDEX ROWID BIG_TABLE_HASHED 49883 4773K 1904(1) 00:00:23 ROWID ROWID * 3 INDEX RANGE SCAN BIG_HASH_IDX1 49883 148(2) 00:00:02 KEY KEY # 모든 Partition 영역을검색하는가? Partition key로사용중인 owner 컬럼의값에따라특정 Partition 영역만작업한다. # 실제실행된상태의내용도다시한번확인 > conn system/oracle > alter session set events '10046 trace name context forever, level 12' ; > @$HOME/mylabs/perf_test.sql > alter session set events '10046 trace name context off' ;
# SQL Trace File의위치확인및분석 > @$HOME/mylabs/trace_tkprof.sql User Trace File ----------------------------------------------------------------------------------------------------------------------------- host tkprof /u01/app/oracle/admin/orcl/udump/orcl_ora_23699.trc $HOME/output.txt sys=no # 위의실행결과를그대로복사해서실행 > host tkprof /u01/app/oracle/admin/orcl/udump/orcl_ora_23699.trc $HOME/output.txt sys=no > host vi $HOME/output.txt SELECT * FROM BIG_TABLE_HASHED WHERE OWNER = 'SPECIAL_ROW' call count cpu elapsed disk query current rows ------- ------ -------- ---------- ---------- ---------- ---------- ---------- Parse 1 0.01 0.01 0 0 0 0 Execute 1000 0.11 0.10 0 0 0 0 Fetch 1000 0.10 0.15 3 4000 0 1000 ------- ------ -------- ---------- ---------- ---------- ---------- ---------- total 2001 0.23 0.27 3 4000 0 1000 Misses in library cache during parse: 1 Optimizer mode: ALL_ROWS Parsing user id: 5 (recursive depth: 1) Rows Row Source Operation ------- --------------------------------------------------- 1000 PARTITION RANGE SINGLE PARTITION: 3 3 (cr=4000 pr=3 pw=0 time=166058 us) 1000 TABLE ACCESS BY GLOBAL INDEX ROWID BIG_TABLE_HASHED PARTITION: ROW LOCATION ROW LOCATION (cr=4000 pr=3 pw=0 time=134676 us) 1000 INDEX RANGE SCAN BIG_HASH_IDX1 PARTITION: 3 3 (cr=3000 pr=2 pw=0 time=80809 us) (object id 52649) Elapsed times include waiting on following events: Event waited on Times Max. Wait Total Waited ---------------------------------------- Waited ---------- ------------ db file sequential read 3 0.03 0.06 # 결과어떠한가? > drop table big_table purge ; > drop table big_table_hashed purge ;
Chapter 11. Managing Storage # Block 공간정보확인 sqlplus / as sysdba > host cat $HOME/mylabs/show_space.sql > @$HOME/mylabs/show_space.sql > select tablespace_name, segment_space_management from dba_tablespaces ; TABLESPACE_NAME SEGMEN ------------------------------ ------ SYSTEM MANUAL UNDOTBS1 MANUAL SYSAUX AUTO TEMP MANUAL USERS AUTO EXAMPLE AUTO > create table test tablespace users as select * from scott.emp ; > select segment_name, file_id, extent_id, block_id, blocks from dba_extents where segment_name = 'TEST' and owner = USER ; SEGMENT_NAME FILE_ID EXTENT_ID BLOCK_ID BLOCKS ------------------------------------------------------- ---------- ---------- ---------- ---------- TEST 4 0 1681 8 > save 1 > exec show_space('test') Unformatted Blocks... 0 FS1 Blocks (0-25)... 0 FS2 Blocks (25-50)... 0 FS3 Blocks (50-75)... 0 FS4 Blocks (75-100)... 0 Full Blocks... 1 Total Blocks... 8 Total Bytes... 65,536 Total MBytes... 0 Unused Blocks... 4 Unused Bytes... 32,768 Last Used Ext FileId... 4 Last Used Ext BlockId... 1,681 Last Used Block... 4
> begin for i in 1..10 loop insert into test select * from test ; end loop ; commit ; end; / > delete test where deptno = 10 ; > commit ; > alter table test allocate extent ; > @1 SEGMENT_NAME FILE_ID EXTENT_ID BLOCK_ID BLOCKS -------------------- ---------- ---------- ---------- ---------- TEST 4 0 1681 8 TEST 4 1 1689 8 TEST 4 2 1697 8 TEST 4 3 1705 8 TEST 4 4 1713 8 TEST 4 5 1721 8 TEST 4 6 1729 8 TEST 4 7 1737 8 TEST 4 8 1745 8 TEST 4 9 1753 8 TEST 4 10 1761 8 TEST 4 11 1769 8 TEST 4 12 1777 8 > exec show_space('test') Unformatted Blocks... 0 FS1 Blocks (0-25)... 0 FS2 Blocks (25-50)... 84 FS3 Blocks (50-75)... 0 FS4 Blocks (75-100)... 3 Full Blocks... 1 Total Blocks... 104 Total Bytes... 851,968 Total MBytes... 0 Unused Blocks... 8 Unused Bytes... 65,536 Last Used Ext FileId... 4 Last Used Ext BlockId... 1,769 Last Used Block... 8 > drop table test purge ;
# Resumable Space Allocation > sqlplus /as sysdba > host cat $HOME/mylabs/print_table.sql > @$HOME/mylabs/print_table.sql > create tablespace userdata datafile '/u01/app/oracle/oradata/orcl/userdata.dbf' size 1m autoextend off ; > create user test identified by test default tablespace userdata ; > grant connect, resource to test ; # Resumable 권한부여 > grant resumable to test ; # test 유저로접속 > conn test/test > create table t1 (id char(1000)) ; # 대량의 Data Loading 중에공간이부족하면 error 발생 > begin for i in 1..1000 loop insert into t1 values ( i ) ; end loop ; end; / ERROR at line 1: ORA-01653: unable to extend table TEST.T1 by 8 in tablespace USERDATA ORA-06512: at line 3 > save load replace # Resumable Space Allocation 활성화 > alter session enable resumable ; # 대량의 Data Loading 중에공간이부족해도 error 발생안함 ( 대기중 ) > @load
# 새로운터미널을열고실습진행 tail -60 $ORACLE_BASE/admin/orcl/bdump/alert_orcl.log Sat Apr 11 04:32:50 2009 statement in resumable session 'User TEST(62), Session 152, Instance 1' was suspended due to ORA-01653: unable to extend table TEST.T1 by 8 in tablespace USERDATA # Dictionary View를통해서확인 sqlplus / as sysdba > exec print_table('select user_id, session_id, status,start_time, - suspend_time, sql_text, error_number, error_msg - from dba_resumable' ) ; USER_ID : 62 SESSION_ID : 152 STATUS : SUSPENDED START_TIME : 04/11/09 04:32:47 SUSPEND_TIME : 04/11/09 04:32:50 SQL_TEXT : INSERT INTO T1 VALUES (:B1) ERROR_NUMBER : 1653 ERROR_MS : ORA-01653: unable to extend table TEST.T1 by 8 in tablespace USERDATA # Tablespace 의공간부족해결 > alter database datafile '/u01/app/oracle/oradata/orcl/userdata.dbf' autoextend on ; # 대기중이던 test 유저의터미널을보면작업이재개되어있다. # 실습정리 > drop user test cascade ; > drop tablespace userdata including contents and datafiles ; > exit
# Transportable Tablespace sqlplus / as sysdba # 플랫폼의엔디언형식확인 > select platform_name from v$database ; > select * from v$transportable_platform ; # 실습준비 > create tablespace oltp datafile '/u01/app/oracle/oradata/orcl/oltp.dbf' size 10m ; > create user oltp identified by oltp default tablespace oltp ; > grant dba to oltp ; > conn oltp/oltp > create table dept as select * from scott.dept ; > alter table dept add primary key(deptno) using index tablespace users ; > create table emp as select * from scott.emp ; > alter table emp add foreign key(deptno) references dept(deptno) ; # Transport 하기전에검사 > conn /as sysdba > exec dbms_tts.transport_set_check('oltp',true) > select * from transport_set_violations ; VIOLATIONS ------------------------------------------------------------------------------------------------------------------------ Index OLTP.SYS_C005479 in tablespace USERS enforces primary constriants of table OLTP.DEPT in tablespace OLTP # 참조무결성을위한인덱스가있다면 oltp tablespace 로이동 > alter index oltp.sys_c005479 rebuild tablespace oltp ; > alter tablespace oltp read only ; > exit
# Datapump 를이용하여필요한 Metadata 수집 expdp system/oracle transport_tablespaces =OLTP dumpfile=tts.dmp Dump file set for SYSTEM.SYS_EXPORT_TRANSPORTABLE_01 is: /u01/app/oracle/admin/orcl/dpdump/tts.dmp Job "SYSTEM"."SYS_EXPORT_TRANSPORTABLE_01" successfully completed at 05:08:31 # 필요한파일들을원하는경로로이동 cp /u01/app/oracle/oradata/orcl/oltp.dbf /u01/app/oracle/oradata/catdb/oltp.dbf cp /u01/app/oracle/admin/orcl/dpdump/tts.dmp $ORACLE_HOME/rdbms/log/ # 필요한유저생성 sqlplus sys/oracle@catdb as sysdba > create user oltp identified by oltp ; > grant connect, resource to oltp; > exit # Datapump 를이용하여 Target DB에정보입력 export ORACLE_SID=catdb impdp system/oracle dumpfile=tts.dmp \ transport_datafiles=/u01/app/oracle/oradata/catdb/oltp.dbf # 결과확인 sqlplus sys/oracle@catdb as sysdba > alter tablespace oltp read write ; > select * from oltp.emp ; > conn sys/oracle@orcl as sysdba > alter tablespace oltp read write ; > drop tablespace oltp including contents and datafiles ; > drop user oltp cascade ; > exit
Chapter 15. Database Security # Virtual Private Database 구성 # context 생성권한부여 sqlplus / as sysdba # context 에필요한응용프로그램의속성등을저장하는 package 생성 > host cat $HOME/mylabs/create_pkg.sql CREATE OR REPLACE PACKAGE system.app_pkg IS PROCEDURE show_app_context ; PROCEDURE set_app_context ; FUNCTION the_predicate (schema_in VARCHAR2, name_in VARCHAR2) RETURN VARCHAR2 ; END app_pkg ; / CREATE OR REPLACE PACKAGE BODY system.app_pkg IS c_context VARCHAR2(30) := 'HR_INFO' ; c_dept_attrib VARCHAR2(30) := 'DEPT_ATTRIB' ; c_title_attrib VARCHAR2(30) := 'TITLE_ATTRIB' ; c_country_attrib VARCHAR2(30) := 'COUNTRY_ATTRIB' ; c_dept_val VARCHAR2(30) := 'Finance' ; c_title_val VARCHAR2(30) := 'FI_MGR' ; c_country_val VARCHAR2(4) := 'US' ; PROCEDURE show_app_context IS BEGIN DBMS_OUTPUT.PUT_LINE('Type: ' c_dept_attrib ' - ' SYS_CONTEXT(c_context, c_dept_attrib)); DBMS_OUTPUT.PUT_LINE('Type: ' c_title_attrib ' - ' SYS_CONTEXT(c_context, c_title_attrib)); DBMS_OUTPUT.PUT_LINE('Type: ' c_country_attrib ' - ' SYS_CONTEXT(c_context, c_country_attrib)); END show_app_context ; PROCEDURE set_app_context IS BEGIN DBMS_SESSION.SET_CONTEXT(c_context, c_dept_attrib, c_dept_val) ; DBMS_SESSION.SET_CONTEXT(c_context, c_title_attrib, c_title_val) ; DBMS_SESSION.SET_CONTEXT(c_context, c_country_attrib, c_country_val) ; END set_app_context ; FUNCTION the_predicate (schema_in VARCHAR2, name_in VARCHAR2) RETURN VARCHAR2 IS l_context VARCHAR2(100) := SYS_CONTEXT(c_context, c_dept_attrib) ; v_return_val VARCHAR2(2000); BEGIN
IF user = 'HR' and l_context = 'Finance' THEN v_return_val := 'department_id IN ( SELECT department_id FROM hr.departments WHERE department_name = SYS_CONTEXT(''' c_context ''',''' c_dept_attrib '''))'; RETURN v_return_val ; ELSE RETURN NULL; END IF; END the_predicate ; END app_pkg; / > @$HOME/mylabs/create_pkg.sql # system 계정에생성되는 Package 이며 3가지의 Subprogram 이있다. show_app_context : 현재 context 에등록된정보확인 set_app_context : context 에속성정의 the_predicate : VPD 정책을구현하는함수 > grant execute on system.app_pkg to hr; # FGAC 정책구현 > create or replace context hr_info using system.app_pkg ; > host cat $HOME/mylabs/add_policy.sql BEGIN DBMS_RLS.ADD_POLICY ( 'HR', 'EMPLOYEES', 'HR_POLICY', 'SYSTEM', 'APP_PKG.THE_PREDICATE', 'SELECT, UPDATE, DELETE', FALSE, TRUE); END; / > @$HOME/mylabs/add_policy.sql
# 모든로그인작업후정보를 context에저장할수있도록트리거구현 > create or replace trigger set_id_on_logon after logon on database begin system.app_pkg.set_app_context ; end ; / # HR 유저로로그인후결과확인 > conn hr/hr > set serveroutput on > exec system.app_pkg.show_app_context Type: DEPT_ATTRIB - Finance Type: TITLE_ATTRIB - FI_MGR Type: COUNTRY_ATTRIB - US > select employee_id, first_name, last_name, salary, department_id from hr.employees ; EMPLOYEE_ID FIRST_NAME LAST_NAME SALARY DEPARTMENT_ID ----------- -------------------- ------------------------- ---------- ------------- 108 Nancy Greenberg 12000 100 109 Daniel Faviet 9000 100 110 John Chen 8200 100 111 Ismael Sciarra 7700 100 112 Jose Manuel Urman 7800 100 113 Luis Popp 6900 100 # system 유저로로그인하여확인. 위와동일한결과가나오는가? > conn system/oracle > set serveroutput on > exec system.app_pkg.show_app_context Type: DEPT_ATTRIB - Finance Type: TITLE_ATTRIB - FI_MGR Type: COUNTRY_ATTRIB - US
> select employee_id, first_name, last_name, salary, department_id from hr.employees ; 191 Randall Perkins 2500 50 192 Sarah Bell 4000 50 193 Britney Everett 3900 50 194 Samuel McCain 3200 50 195 Vance Jones 2800 50 196 Alana Walsh 3100 50 197 Kevin Feeney 3000 50 107 rows selected. # 실습정리 > conn /as sysdba > begin dbms_rls.drop_policy( 'HR', 'EMPLOYEES', 'HR_POLICY') ; end; / > drop trigger set_id_on_logon ; > drop package system.app_pkg ; > drop context hr_info ;