Friday, December 31, 2010

XML Import

CREATE PROCEDURE [TestingSP].[prc_Testing_INSERT_XML]
@intClientID INT,
@CurrencyXML XML
AS
DECLARE @TempTable TABLE (FirstName NVARCHAR (256), LastName NVARCHAR (256), Email NVARCHAR(256));
BEGIN


INSERT INTO @TempTable(FirstName, LastName, Email)
SELECT MliFirstName = T.Item.query('firstname').value('.', 'NVARCHAR(256)'),
MliLastName = T.Item.query('lastname').value('.', 'NVARCHAR(256)'),
MliEmail = T.Item.query('email').value('.', 'NVARCHAR(256)')
FROM @CurrencyXML.nodes('shq-mailinglist/person') AS T(Item)

BEGIN TRY
IF EXISTS (SELECT * FROM @TempTable)
BEGIN

INSERT INTO Testing.ImpMailingList(ClID, ImlFirstName,
ImlLastName, ImlEmail)
SELECT @intClientID, tt.FirstName, tt.LastName, tt.Email
FROM @TempTable tt;
END

RETURN 1;
END TRY
BEGIN CATCH

INSERT INTO Testing.ApplicationError(ClID, AerENo, AerESeverity, AerEState, AerEProcedure, AerELine, AerEMessage)
VALUES(NULL, ERROR_NUMBER(), ERROR_SEVERITY(), ERROR_STATE(), ERROR_PROCEDURE(), ERROR_LINE(), ERROR_MESSAGE());

RETURN -1;
END CATCH

END

Wednesday, December 29, 2010

Passing Multiple Records to a Stored Procedure in SQL Server

CREATE PROCEDURE [ImportJobMaterials]
@JobMaterialsXml AS VARCHAR(MAX),
@JobID AS INT,
@ChangeOrderNumber AS VARCHAR(10) = NULL,
@ChangeOrderDescription AS VARCHAR(100) = NULL
AS
BEGIN
DECLARE @XML AS XML

DECLARE @MaterialsTable TABLE
(
ID INT IDENTITY(1,1),
Area VARCHAR(250),
Phase VARCHAR(250),
WorkCodeID INT,
WorkCodeTitle VARCHAR(250),
MaterialTitle VARCHAR(250),
Quantity DECIMAL(18,2),
TotalHours DECIMAL(18,2)
)

SELECT @XML = @JobMaterialsXml

INSERT INTO @MaterialsTable (Area, Phase, WorkCodeID, WorkCodeTitle, MaterialTitle, Quantity, TotalHours)
SELECT M.Item.query('./Area').value('.','VARCHAR(250)') Area,
M.Item.query('./Phase').value('.','VARCHAR(250)') WorkCode,
M.Item.query('./WorkCodeID').value('.','INT') WorkCodeID,
M.Item.query('./WorkCodeTitle').value('.','VARCHAR(250)') WorkCodeTitle,
M.Item.query('./MaterialTitle').value('.','VARCHAR(250)') MaterialTitle,
M.Item.query('./Quantity').value('.','DECIMAL(18,2)') Quantity,
M.Item.query('./TotalHours').value('.','DECIMAL(18,2)') TotalHours
FROM @XML.nodes('/ArrayOfJobMaterialImport/JobMaterialImport') AS M(Item)

--Process the data
END

Monday, December 13, 2010

Page Life Expectancy (SQL Server)

SELECT [object_name],
[counter_name],
[cntr_value]
FROM sys.dm_os_performance_counters
WHERE [object_name] LIKE '%Manager%'
AND [counter_name] = 'Page life expectancy'

Friday, December 10, 2010

Find Unused Indexes of Current Database

DECLARE @dbid INT
SELECT @dbid = DB_ID(DB_NAME())
SELECT OBJECTNAME = OBJECT_NAME(I.OBJECT_ID),
INDEXNAME = I.NAME,
I.INDEX_ID
FROM SYS.INDEXES I
JOIN SYS.OBJECTS O
ON I.OBJECT_ID = O.OBJECT_ID
WHERE OBJECTPROPERTY(O.OBJECT_ID,'IsUserTable') = 1
AND I.INDEX_ID NOT IN (
SELECT S.INDEX_ID
FROM SYS.DM_DB_INDEX_USAGE_STATS S
WHERE S.OBJECT_ID = I.OBJECT_ID
AND I.INDEX_ID = S.INDEX_ID
AND DATABASE_ID = @dbid)
ORDER BY OBJECTNAME,
I.INDEX_ID,
INDEXNAME ASC
GO

SQL SERVER – Find Row Count in Table – Find Largest Table in Database

SELECT sc.name +'.'+ ta.name TableName
,SUM(pa.rows) RowCnt
FROM sys.tables ta
INNER JOIN sys.partitions pa
ON pa.OBJECT_ID = ta.OBJECT_ID
INNER JOIN sys.schemas sc
ON ta.schema_id = sc.schema_id
WHERE ta.is_ms_shipped = 0 AND pa.index_id IN (1,0)
GROUP BY sc.name,ta.name
ORDER BY SUM(pa.rows) DESC

Thursday, December 9, 2010

Daily Activities- SQL Server BackUP & Restore

The following is the query for recent 100 records for database backups.
you get results by changing backup type below

D= Full backup
L= T-Log backup
I=Incremental or Differential

SELECT TOP 100 bs.database_name,
bs.backup_start_date,
bs.backup_finish_date,
bm.physical_device_name,
bm.logical_device_name
FROM msdb..backupset bs
JOIN msdb..backupmediafamily bm
ON bs.media_set_id = bm.media_set_id
WHERE bs.TYPE = 'D'
ORDER BY bs.backup_start_date DESC,
bs.database_name

///////////////////////////////////////////////////////////////////////////////////////

Another query to get Backup start and end time with total backup size.

SELECT
a.database_name AS dbname,
CONVERT(CHAR(20),a.backup_start_date,100)
AS start_time,
CONVERT(CHAR(20),a.backup_finish_date,100)
AS end_time,
a.backup_size,
a.backup_size/1024/1024 AS [backup_Size_MB]
FROM
msdb.dbo.backupset AS a
LEFT JOIN
msdb.dbo.backupset AS b
ON
a.database_name = b.database_name
AND a.backup_start_date >
(SELECT MAX(backup_start_date) FROM
msdb.dbo.backupset) - 1
AND b.backup_start_date = DATEADD(week, -1,
(SELECT MAX(backup_start_date)
FROM msdb.dbo.backupset) - 1)
ORDER BY
a.database_name,
a.backup_start_date

/////////////////////////////////////////////////////////// RESTORE Query

Another beautiful query here to result restore history

DECLARE @dbname SYSNAME
SET @dbname = 'db_name' /* Replace with your dbname*/
SELECT
destination_database_name AS 'Database Name',
[user_name] AS 'Username',
CASE restore_type
WHEN NULL THEN 'NULL'
WHEN 'D' THEN 'Database'
WHEN 'F' THEN 'File'
WHEN 'G' THEN 'Filegroup'
WHEN 'I' THEN 'Differential'
WHEN 'L' THEN 'Log File'
WHEN 'V' THEN 'Verifyonly'
WHEN 'R' THEN 'Revert'
END AS 'Restore Type',
CASE [replace]
WHEN NULL THEN 'NULL'
WHEN 1 THEN 'YES'
WHEN 0 THEN 'NO'
END AS 'Database Replaced',
restore_date AS 'Date Restored'
FROM msdb..restorehistory
WHERE destination_database_name = CASE
WHEN @dbname IS NOT NULL THEN @dbname
ELSE destination_database_name END
ORDER BY restore_date DESC

Wednesday, December 8, 2010

Get Database Backup History

SELECT TOP 100
s.database_name,
m.physical_device_name,
CAST(CAST(s.backup_size / 1000000 AS INT) AS VARCHAR(14)) + ' ' + 'MB' AS bkSize,
CAST(DATEDIFF(second, s.backup_start_date,
s.backup_finish_date) AS VARCHAR(4)) + ' ' + 'Seconds' TimeTaken,
s.backup_start_date,
CAST(s.first_lsn AS VARCHAR(50)) AS first_lsn,
CAST(s.last_lsn AS VARCHAR(50)) AS last_lsn,
CASE s.[type]
WHEN 'D' THEN 'Full'
WHEN 'I' THEN 'Differential'
WHEN 'L' THEN 'Transaction Log'
END AS BackupType,
s.server_name,
s.recovery_model
FROM msdb.dbo.backupset s
INNER JOIN msdb.dbo.backupmediafamily m ON s.media_set_id = m.media_set_id
--WHERE s.database_name = DB_NAME() -- Remove this line for all the database
ORDER BY backup_start_date DESC, backup_finish_date
GO

Identify blocking

SELECT
db.name DBName,
tl.request_session_id,
wt.blocking_session_id,
OBJECT_NAME(p.OBJECT_ID) BlockedObjectName,
tl.resource_type,
h1.TEXT AS RequestingText,
h2.TEXT AS BlockingTest,
tl.request_mode
FROM sys.dm_tran_locks AS tl
INNER JOIN sys.databases db ON db.database_id = tl.resource_database_id
INNER JOIN sys.dm_os_waiting_tasks AS wt ON tl.lock_owner_address = wt.resource_address
INNER JOIN sys.partitions AS p ON p.hobt_id = tl.resource_associated_entity_id
INNER JOIN sys.dm_exec_connections ec1 ON ec1.session_id = tl.request_session_id
INNER JOIN sys.dm_exec_connections ec2 ON ec2.session_id = wt.blocking_session_id
CROSS APPLY sys.dm_exec_sql_text(ec1.most_recent_sql_handle) AS h1
CROSS APPLY sys.dm_exec_sql_text(ec2.most_recent_sql_handle) AS h2
GO

Friday, November 26, 2010

List all Default Values in a SQL Server Database

SELECT obj.name as 'Table', col.name as 'Column',
object_definition(default_object_id) AS [DefaultValue]
FROM sys.objects obj INNER JOIN sys.columns col
ON obj.object_id = col.object_id
where obj.type = 'U'

Tuesday, November 16, 2010

Script for Transaction Log - Back UP

DECLARE @DBName varchar(255)
DECLARE @DS VARCHAR(50)
DECLARE @Path VARCHAR(255)



DECLARE TLOG_BACKUP CURSOR FOR

select name from sys.databases
where recovery_model = 1 --Only return databases in full recovery
AND name NOT IN ('TEMPDB') -- Exclude TEMPDB
AND state = 0 --Exclude offline databases, they won't backup if they offline anyway
AND Source_database_id is null -- Removes snapshots from the databases returned

OPEN TLOG_BACKUP


FETCH NEXT FROM TLOG_BACKUP
INTO @DBName

-- Check @@FETCH_STATUS to see if there are any more rows to fetch.
WHILE @@FETCH_STATUS = 0
BEGIN
--Set the filename values of the transaction log backup files
SET @DS = REPLACE(CONVERT(VARCHAR(10), GETDATE(), 111), '/', '') + '_'
+ REPLACE(CONVERT(VARCHAR(8), GETDATE(), 108), ':', '')
SET @Path = 'C:\DATA\Backup\'
SET @Path = @path + @DBNAME + '_'+ 'LOG'+'_' + @DS + '.trn'
--Take the backup

BACKUP LOG @DBNAME
TO DISK = @Path
WITH
FORMAT, INIT, SKIP, NOREWIND,
NOUNLOAD, STATS = 10

FETCH NEXT FROM TLOG_BACKUP
INTO @DBName


END

CLOSE TLOG_BACKUP
DEALLOCATE TLOG_BACKUP
GO

SQL Script for Full Back-UP

DECLARE @name VARCHAR(50) -- database name
DECLARE @path VARCHAR(256) -- path for backup files
DECLARE @fileName VARCHAR(256) -- filename for backup
DECLARE @fileDate VARCHAR(20) -- used for file name

SET @path = 'D:\Backup\'

SELECT @fileDate = CONVERT(VARCHAR(20),GETDATE(),112)

DECLARE db_cursor CURSOR FOR
SELECT name
FROM master.dbo.sysdatabases
WHERE name NOT IN ('master','model','msdb','tempdb')

OPEN db_cursor
FETCH NEXT FROM db_cursor INTO @name

WHILE @@FETCH_STATUS = 0
BEGIN
SET @fileName = @path + @name + '_' + @fileDate + '.BAK'
BACKUP DATABASE @name TO DISK = @fileName

FETCH NEXT FROM db_cursor INTO @name
END

CLOSE db_cursor
DEALLOCATE db_cursor

Friday, October 29, 2010

How to create Logins, Users, Schemas and Database Roles

CREATE LOGIN [Hello.org] WITH PASSWORD=N' Ñèw?qÉSyDÿ8?s ! Mån¯;Ì®3¡y¹ ? ', DEFAULT_DATABASE=[master],
DEFAULT_LANGUAGE=[us_english], CHECK_EXPIRATION=OFF, CHECK_POLICY=ON
GO

ALTER LOGIN [Hello.org] DISABLE
GO


-------------------------------------------------------------

CREATE LOGIN [Hello.org_Shop] WITH PASSWORD=N'B2+ó ð?;aMÈb ÿ±il®¡ÇùÍ áLçÒ?A: ¡', DEFAULT_DATABASE=[master],
DEFAULT_LANGUAGE=[us_english], CHECK_EXPIRATION=OFF, CHECK_POLICY=ON
GO

ALTER LOGIN [Hello.org_Shop] DISABLE
GO

---------------------------------------------------------------------------



CREATE USER [Hello.org] FOR LOGIN [Hello.org] WITH DEFAULT_SCHEMA=[Hello.org_SP]
GO
-------------------------------------------------------------------------

CREATE USER [Hello.org_Shop] FOR LOGIN [Hello.org_Shop] WITH DEFAULT_SCHEMA=[Hello.org_ShopSP]
GO


-------------------------------------------------------------------------------------------------------------------------

CREATE SCHEMA [Hello.org_SP] AUTHORIZATION [dbo]
GO

CREATE SCHEMA [Hello.org_ShopSP] AUTHORIZATION [dbo]
GO


CREATE SCHEMA [Hello.org_InternalSP] AUTHORIZATION [dbo]
GO


---------------------------------------------------------------------------------------------------------------

CREATE ROLE [DBR_Hello.org] AUTHORIZATION [dbo]
GO


CREATE ROLE [DBR_Hello.org_Shop] AUTHORIZATION [dbo]
GO

Wednesday, October 27, 2010

Authenticate Member SP

ALTER PROCEDURE [The].[prc_MemberAuthenticate]
@chvEmail varchar(128),
@chvnPassword nvarchar(100),
@ReturnCode int OUT
AS
DECLARE @chvnMemberPassword NVARCHAR(128),
@chvMemberNo varchar (9),
@chvEmailDB varchar(128),
@IsPaid bit


SET @chvMemberNo = ''
SET @IsPaid = 0

BEGIN
SELECT @IsPaid = IsPaid,@chvMemberNo = MemberID, @chvnMemberPassword = [Password], @chvEmailDB = Email
FROM The.Memberships WHERE Email = @chvEmail and IsPaid = 1

IF (@IsPaid = 0)
BEGIN
SET @returnCode = -3 --Unpaid
END
ELSE IF (@chvEmailDB is null OR @chvEmailDB = '')
BEGIN
SET @returnCode = -1 --Wrong UserName
END
ELSE IF (@chvnPassword <> @chvnMemberPassword)
BEGIN
SET @returnCode = -2 --Wrong Password
END
--ELSE IF (@var_Status=0)
-- BEGIN
-- SET @returnCode = -3 -- Not active
-- END
ELSE
BEGIN
SET @returnCode = 1 -- Login successfully
END

IF (@returnCode = 1)
BEGIN
SELECT MemberID,MemberNo, (FirstName + ' ' + LastName) as Name
FROM The.Memberships
WHERE Email = @chvEmail
END
END

Monday, September 27, 2010

Trigger to avoid drop / alter tables by various logins

CREATE TRIGGER [Security]
ON DATABASE
FOR ALTER_TABLE, ALTER_PROCEDURE, DROP_TABLE, DROP_PROCEDURE, ALTER_TRIGGER, DROP_TRIGGER
AS
IF SYSTEM_USER NOT IN ('YadavA', 'BhandariD','groverv','mittals')
BEGIN
RAISERROR ('You do not have necessary permissions to perform this action! Contact the DBA for further help',10, 1)
ROLLBACK
END

GO

Thursday, September 23, 2010

Move SQL Server transaction log files to a different location via T-SQL and SSMS

Example - T-SQL Commands

In this example, we will work through the steps to move a transaction log file to a new location via T-SQL commands. The first script will return the current locations, size, etc. of the database and transaction log files.

Script # 1: Capture database and transaction log file information
USE AdventureWorks
GO

sp_helpfile
GO


Below is sample output from the script showing that database has only two files. First is primary database file and second is the transaction log file. Although the file name, size, etc. may be different on your SQL Server, you will know the exact location of the files before the transaction log move begins. Note the location of the database file since it will be used during the database attach process.



Once you have the location information and have negotiated downtime with your users, now it is time to get exclusive access of the database in order to detach the database. If users are still connected to the database during the downtime, it is possible to remove them by using the With Rollback Immediate option or you can kill the connections via this tip.

Script # 2: Set database to single user mode and detach database
Use MASTER
GO

-- Set database to single user mode
ALTER DATABASE adventureWorks
SET SINGLE_USER
GO

-- Detach the database
sp_detach_db 'AdventureWorks'
GO


Now the database is detached. Once the detach process is completed, then you can copy and paste the new transaction log file then delete the old transaction log file via Windows Explorer. Once this is completed, we can attach the database with SQL Server database log file at new location with the following script:

Script # 3: Attach database with log file at new location
USE master
GO

-- Now Attach the database
sp_attach_DB 'AdventureWorks',
'D:\Program Files\Microsoft SQL Server\MSSQL\Data\AdventureWorks_Data.mdf',
'E:\Move LogFile here through T-SQL\AdventureWorks_Log.ldf'
GO



After the final attach command our transaction log file has been moved to new location and our database is operational with log file on new location. Verifying the new database transaction log location can be accomplished by re-running script #1 above.

Key Questions for --- SQL Server Disaster Recovery Planning and Testing

Key Questions

The first step in testing your disaster recovery plan is to ask yourself and your team some poignant questions and respond in simple terms, maybe even 1 word answers. Depending on how you and your team answer these questions dictates how the fire drill can be conducted.

SLA - What is your Service Level Agreement (SLA) with the business or your customers?
Cost - What is the tangible cost of downtime for your users, application, business, etc?
Prevention - What type of disaster are you trying to prevent?
Recovery - What type of disaster are you trying to recover from?
Time - How much time and money do you have to spend on building, testing and maintaining the disaster recovery plan on a daily, weekly, monthly or quarterly basis?
Priority - Where does the disaster recovery plan fit into the organizational, departmental and personal priority list?
Need - What are the true disaster recovery risks for your organization based on where you are located, your line of business, your affiliations, etc?
Responsibility - What are your disaster recovery responsibilities and why do you have those responsibilities?
Plan - Do you have a documented disaster recovery plan?
Testing - Have you tested the disaster recovery plan?
Documentation - Do you update your documentation as dependent objects change? Or monthly? Or quarterly?
People - Will you have people to support the business?
Power - Will you have power to support the business?
Access - Will you be able to get to the office or make a remote connection?
Process - Will you have a process that everyone on the team can follow?
Technology - Have you invested in technology that will improve the prevention or recovery from a disaster?
Dependencies - Are you and your team dependent on other teams or external entities in order for your applications to operate properly? Do those teams test their plans on a regular basis?
Mitigation - Have you put multiple lines of defenses up to prevent a disaster as well as recover from one?
Limits - How long can your business run without the IT infrastructure and applications?
Alternatives - Can the business run on paper based operations for a finite period of time?
Experience - If you have an entire backup site\infrastructure have you failed over to it during a low usage period such as a Saturday or Sunday so the business really knows how the applications will perform even with a limited number of users?
Impacts - If you have an extended downtime, what will that do to your business in terms of customer loyalty, industry reputation, cash flow, etc?

SQL Server Monitoring Scripts with the DMVs

Problem
As a SQL Server DBA, dedicated to monitoring and administering many busy OLTP production environments, I always find myself trying to answer questions like: What was SQL Server doing last night when the end users were experiencing slow application response times? Was the issue from SQL Server? Or was it a network or a application issue? The answers to such questions are never easy to identify. If I only knew what SQL Server was doing at that specific point in time. If I knew what SQL Server was doing at that point in time, it may help me understand if the issues was a SQL Server performance issue or not . So how can I figure this out? If you do not have a third party monitoring tool in place, the only way is to constantly monitor SQL Server. To do this without being overly intrusive, we need to rely on the SQL Server DMVs.

Solution

This tip briefly describes how to successfully combine the usage of two SQL Server DMV’s (sys.dm_exec_requests and sys.dm_exec_sessions) to create a rudimentary, non intrusive and very efficient tool to monitor requests that are being executed against a SQL Server 2005 or SQL Server 2008 instance. The only two DMV’s that I will need to create my T-SQL based monitor script are sys.dm_exec_requests and sys.dm_exec_sessions. It is not my intention to explain the details of those two DMV's, I will only combine them to show what good information comes from them.

The following script comes in handy because it captures the code the SQL Server engine is processing at any point in time. Here is that query:

SELECT T.[text], P.[query_plan], S.[program_name], S.[host_name],
S.[client_interface_name], S.[login_name], R.*
FROM sys.dm_exec_requests R
INNER JOIN sys.dm_exec_sessions S
ON S.session_id = R.session_id
CROSS APPLY sys.dm_exec_sql_text(sql_handle) AS T
CROSS APPLY sys.dm_exec_query_plan(plan_handle) As P
GO

In order to create our simple monitoring tool, let's create the following monitoring table. Here is the script:

CREATE TABLE [dbo].[MyMonitorTable](
[text] [nvarchar](max) NULL,
[query_plan] [xml] NULL,
[host_name] [nvarchar](128) NULL,
[program_name] [nvarchar](128) NULL,
[client_interface_name] [nvarchar](32) NULL,
[login_name] [nvarchar](128) NOT NULL,
[session_id] [smallint] NOT NULL,
[request_id] [int] NOT NULL,
[start_time] [datetime] NOT NULL,
[status] [nvarchar](30) NOT NULL,
[command] [nvarchar](16) NOT NULL,
[sql_handle] [varbinary](64) NULL,
[statement_start_offset] [int] NULL,
[statement_end_offset] [int] NULL,
[plan_handle] [varbinary](64) NULL,
[database_id] [smallint] NOT NULL,
[user_id] [int] NOT NULL,
[connection_id] [uniqueidentifier] NULL,
[blocking_session_id] [smallint] NULL,
[wait_type] [nvarchar](60) NULL,
[wait_time] [int] NOT NULL,
[last_wait_type] [nvarchar](60) NOT NULL,
[wait_resource] [nvarchar](256) NOT NULL,
[open_transaction_count] [int] NOT NULL,
[open_resultset_count] [int] NOT NULL,
[transaction_id] [bigint] NOT NULL,
[context_info] [varbinary](128) NULL,
[percent_complete] [real] NOT NULL,
[estimated_completion_time] [bigint] NOT NULL,
[cpu_time] [int] NOT NULL,
[total_elapsed_time] [int] NOT NULL,
[scheduler_id] [int] NULL,
[task_address] [varbinary](8) NULL,
[reads] [bigint] NOT NULL,
[writes] [bigint] NOT NULL,
[logical_reads] [bigint] NOT NULL,
[text_size] [int] NOT NULL,
[language] [nvarchar](128) NULL,
[date_format] [nvarchar](3) NULL,
[date_first] [smallint] NOT NULL,
[quoted_identifier] [bit] NOT NULL,
[arithabort] [bit] NOT NULL,
[ansi_null_dflt_on] [bit] NOT NULL,
[ansi_defaults] [bit] NOT NULL,
[ansi_warnings] [bit] NOT NULL,
[ansi_padding] [bit] NOT NULL,
[ansi_nulls] [bit] NOT NULL,
[concat_null_yields_null] [bit] NOT NULL,
[transaction_isolation_level] [smallint] NOT NULL,
[lock_timeout] [int] NOT NULL,
[deadlock_priority] [int] NOT NULL,
[row_count] [bigint] NOT NULL,
[prev_error] [int] NOT NULL,
[nest_level] [int] NOT NULL,
[granted_query_memory] [int] NOT NULL,
[executing_managed_code] [bit] NOT NULL,
[group_id] [int] NOT NULL,
[query_hash] [binary](8) NULL,
[query_plan_hash] [binary](8) NULL
)
GO

In order to monitor the SQL Server activity in an automated manner, create a SQL Server Agent Job that executes the code below every minute.

INSERT INTO MyMonitorTable
SELECT T.text, P.query_plan, S.host_name, S.program_name, S.client_interface_name, S.login_name, R.*
FROM sys.dm_exec_requests R
JOIN sys.dm_exec_sessions S on S.session_id=R.session_id
CROSS APPLY sys.dm_exec_sql_text(sql_handle) AS T
CROSS APPLY sys.dm_exec_query_plan(plan_handle) As P
GO

After the job has been running for hours or days, we can query the MyMonitorTable table to see what SQL Server statements have been captured, as shown in the image below:




Script Caveats

It is important to clarify that this simple monitoring script does not capture all transactions like SQL Server Profiler does. This script with the SQL Server Agent Job only samples SQL Server transactions by the minute. You will not capture all queries. This script is aimed at capturing queries that are long running, may causing blocking and run frequently.

Here are also some columns to focus on as you begin to use the script:

For preliminary troubleshooting check out the following columns:
host_name
program_name
database_id
user_id
reads
writes
wait_type
wait_time
last_wait_type
wait_resource
The query_plan column shows (in xml format) the executed plan that a specific statement used. A graphical plan representation is also available if you click on the XML plan.
The blocking_session_id shows which session is currently blocking a statement and the wait type.
The statatement_text shows what query has been executed.
Statement_start_offset and Statement_start_offset can be used to list the exact T-SQL statement that was executed, within the batch job, at the specific point in time.

SQL Server Transaction Log Grows And Fills Up Drive

Problem
I had a production problem recently, the transaction log grew 1 GB every minute and filled up the entire drive. The error message I got was: "The transaction log for database 'MyDB' is full. To find out why space in the log cannot be reused, see the log_reuse_wait_desc column in sys.databases. My SQL Server is clustered and the issue happened after a node failover occurred. In this tip I cover steps that I took to resolve this problem.

Solution
The message I got was the transaction log drive is full and users are unable to access the application, DBAs never want to hear this! The usual DBA question is "what was changed"?

My first priority was to fix the problem immediately, so users can access the application and then do root cause analysis. For the database in question, I changed the database recovery model to SIMPLE. Then I shrank the transaction log file to a reasonable size (note: you can't shrink it below its original size). After these changes, the log growth stopped, but this still did not tell me what the issue was. Something to note is that changing the recovery model to SIMPLE is not possible if Mirroring or Log Shipping is setup unless you remove these first, but these can be reconfigured later, the important thing is to stop the bleeding ASAP.

Other options would have been to create another transaction log file on a different drive that had plenty of space or to clean up files on the drive that was running out of space. But in my case the file was growing so rapidly that I needed to change the recovery model so the growth would stop until this issue was resolved.

Below are the steps that I took to check the size, change the recovery model and shrink the log file.

SELECT recovery_model_desc FROM sys.databases WHERE name = 'MyDB' --Before

ALTER DATABASE MyDB SET recovery simple

SELECT recovery_model_desc FROM sys.databases WHERE name = 'MyDB' --After

EXEC xp_fixeddrives --Check free drive space

EXEC sp_helpdb MyDB -- Note the size of the log before shrink

DBCC shrinkfile(MyDB_log, 1024) -- shrink log to 1 GB

EXEC sp_helpdb MyDB -- Note the size of the log after shrink

EXEC xp_fixeddrives -- Check free drive space


First aid is complete, database is up and users are able to use the application again.

Now I need to find out what was causing the log to grow so fast, fix the issue, put the database back in FULL recovery mode, do a full backup and make sure my log backups are working correctly.

The drive now has enough space. I put the database in Full recovery mode again to see if I could catch the offending transaction and I monitored drive space and transaction log size. I immediately noticed the transaction log growing about 1 GB per minute. I then executed the block of code below to find out what may be causing the log growth.

DBCC opentran --get the spid and feed it into the next query

DECLARE @handle BINARY(20)
DECLARE @SPID INT
SET @SPID = 100 -- 100 was the spid from the dbcc opentran

SELECT @handle = sql_handle
FROM MASTER..sysprocesses
WHERE spid = @SPID
SELECT [text] FROM ::fn_get_sql(@handle)
GO

The culprit was a stored procedure that was showing up every time I executed the above block of code, although the SPID changed each time. I looked into the stored procedure which was not very complicated, but does a bunch of deletes and inserts based on some condition. This procedure was called by an automated process every minute. What happens when a cluster node fail-over happens? The cache is flushed and all the query plans are gone. Since there has not been any issue with this procedure in the past, my first option was to recompile the procedure. Viola, it worked! The transaction log stopped growing so fast and I didn't see this procedure in the open transactions from the above code. So this simple stored procedure that worked fine suddenly caused a major production issue causing the application to shut down and the fix was as simple as a recompile. Quite interesting.

My next step was to start a full database backup ASAP. While the full database backup was running I kicked off transaction log backups every 5 minutes and they seemed to work fine. I changed the transaction log backup to its normal schedule. After the full backup was completed, I reviewed the sql log, drive space, transaction log size, open transactions and everything seemed fine. If it is a cluster, you could try failing back to the other node and see if that fixes the problem, which in my case was the need for a recompile. In a cluster, the storage is shared by the nodes, so there is no benefit from a storage point of view.

Tuesday, August 31, 2010

SQL SERVER – Delete Backup History – Cleanup Backup History

SQL Server stores history of all the taken backup forever. History of all the backup is stored in msdb database. Many times older history is no more required. Following Stored Procedure can be executed with parameter which takes days of history to keep. In following example 30 is passed to keep history of month.
USE msdb
GO
DECLARE @DaysToKeepHistory DATETIME
SET @DaysToKeepHistory = CONVERT(VARCHAR(10), DATEADD(dd, -30, GETDATE()), 101)
EXEC sp_delete_backuphistory @DaysToKeepHistory
GO

Tuesday, July 27, 2010

How to Recover from a Corrupted Database in SQL Server 2000

Make sure that the database being restored is from the same Product level as the server you are restoring to. I once faced an issue when trying to restore a database from an SP4 to a non-SP4 database server.

1. Detach the corrupted MDF from Enterprise Manager.

2. Save the corrupted MDF to a safe location.

3. Now create a new database with the same name (the location must be the same as the “corrupted MDF and LDF”.

4. Stop the SQL Server service.

5. Now replace this MDF and LDF (new database) with the “corrupted MDF and LDF”.

6. Start SQL Server.

7. Launch Enterprise Manager. Now you should be able to see the database in suspect mode.

8. Launch Query Analyzer and select the master database. Issue the following commands in this order:
sp_configure ‘allow updates’, 1
go

reconfigure with override
go

9. You can now make changes to the system catalog tables.

10. Now to check the status of our database. Issue the following command

select status from sysdatabases where name=’your database name’ (replace with your database name)

11. Now execute the following command:
update sysdatabases
set status = 32768 where name=’your database name’

12. Next restart SQL Server Service.

13. You will now be in Emergency Mode.

14. Now create a destination recovery database, called dbrecover and try to push data from the corrupt database to the new dbrecover database using DTS.

15. Now issue the following undocumented command:
DBCC rebuild_log (‘your database name’, ‘new log filename with path’)

16. After this successful execution, you will need to get into your database, so issue the following commands:
use master
go

sp_dboption ‘your database name’,‘single_user’,’true’
go

DBCC checkdb(‘your database name’,repair_allow_data_loss)
go

17. Finally set your database back to a normal status by issuing the following command:
use master
go

update sysdatabases
set status=0 where name=’your database name’
go

18. Now you can see that your database is back online!

19. As a precautionary measure, please disable updates the system catalogs immediately by issuing the following command:
use master
go

sp_configure ‘allow updates’,0
go

Reconfigure with override
go

This is how one recovers a corrupted mdf database and brings it back online.

If you found this article useful, please visit our sponsors who keep us going...

Tuesday, July 20, 2010

Rename Database

ALTER DATABASE yafnet SET SINGLE_USER WITH ROLLBACK IMMEDIATE
GO


SP_RENAMEDB yafnet,BballersForum


ALTER DATABASE BballersForum SET MULTI_USER
GO

Friday, July 16, 2010

If you are having difficulty tuning the performance of a poorly performing query that has one or more JOINs,

check to see if they query plan created by the query optimizer is using a hash join. When the query optimizer is asked to join two tables that don't have appropriate indexes, it will often perform a hash join.

A hash join is resource intensive (especially CPU and I/O) and can slow the performance of your join. If the query in question is run often, you should consider adding appropriate indexes. For example, if you are joining column1 in table1 to column5 in table2, then column1 in table1 and column5 in table2 need to have indexes.

Once indexes are added to the appropriate columns used in the joins in your query, the query optimizer will most likely be able to use these indexes, performing a nested-loop join instead of a hash join, and performance will improve.

Tips for Rebuilding Indexes

Periodically (daily, weekly, or monthly) perform a database reorganization on all the indexes on all the tables in your database. This will rebuild the indexes so that the data is no longer fragmented. Fragmented data can cause SQL Server to perform unnecessary data reads, slowing down SQL Server's performance. It will also update column statistics.

If you do a reorganization on a table with a clustered index, any non-clustered indexes on that same table will automatically be rebuilt.

Database reorganizations can be done using the Maintenance Wizard, or by running your own custom script via the SQL Server Agent (see below).

The DBCC DBREINDEX command will not automatically rebuild all of the indexes on all the tables in a database; it can only work on one table at a time. But if you run the following script, you can index all the tables in a database with ease:

--Script to automatically reindex all tables in a database

USE DatabaseName --Enter the name of the database you want to reindex

DECLARE @TableName varchar(255)

DECLARE TableCursor CURSOR FOR
SELECT table_name FROM information_schema.tables
WHERE table_type = 'base table'

OPEN TableCursor

FETCH NEXT FROM TableCursor INTO @TableName
WHILE @@FETCH_STATUS = 0
BEGIN
DBCC DBREINDEX(@TableName,' ',90)
FETCH NEXT FROM TableCursor INTO @TableName
END

CLOSE TableCursor

DEALLOCATE TableCursor

The script will automatically reindex every index in every table of any database you select, and provide a fill factor of 90%. You can substitute any number appropriate for the fill factor in the above script.

When DBCC DBREINDEX is used to rebuild indexes, keep in mind that as the indexes on a specific table are being rebuilt, that the table becomes unavailable for use by your users.

For example, when a non-clustered index is rebuilt, a shared table lock is put on the table, preventing all but SELECT operations to be performed on it. When a clustered index is rebuilt, an exclusive table lock is put on the table, preventing any table access by your users. Because of this, you should only run this command when users don't need access to the tables being reorganized. [7.0, 2000, 2005] Updated 7-24-2006

*****

When you create or rebuild an index, you can specify a fill factor, which is the amount the data pages in the index that are filled. A fill factor of 100 means that each index page is 100% full, a fill factor of 50% means each index page is 50% full. If you create a clustered index that has a fill factor of 100, and it is not based on a monotonically increasing key, that means that each time a record is inserted (or perhaps updated), page splits may occur because there is no room for the data in the existing pages. Numerous page splits can slow down SQL Server's performance.

Here's an example: Assume that you have just created a new index on a table with the default fill factor. When SQL Server creates the index, it places the index on contiguous physical pages, which allows optimal I/O access because the data can be read sequentially. But as the table grows and changes with INSERTS, UPDATES, and DELETES, page splitting occurs. When pages split, SQL Server must allocate new pages elsewhere on the disk, and these new pages are not contiguous with the original physical pages. Because of this, random I/O, not sequential I/O access must be used to gather the data, which is much slower, to access the index pages.

So what is the ideal fill factor? It depends on the ratio of reads to writes that your application makes to your SQL Server tables. As a rule of thumb, follow these guidelines:

* Low Update Tables (100-1 read to write ratio): 100% fill factor
* High Update Tables (where writes exceed reads): 50%-70% fill factor
* Everything In-Between: 80%-90% fill factor.

You may have to experiment to find the optimum fill factor for your particular application. Don't assume that a low fill factor is always better than a high fill factor. While page splits will be reduced with a low fill factor, it also increases the number of pages that have to be read by SQL Server during queries, which reduces performance. And not only is I/O overhead increased with a too low of fill factor, it also affects your buffer cache. As data pages are moved in from disk to the buffer, the entire page (including empty space) is moved to the buffer. So the lower the fill factor, the more pages that have to be moved into SQL Serve's buffer, which means there is less room for other important data pages to reside at the same time, which can reduce performance.

If you don't specify a fill factor, the default fill factor is 0, which means the same as a 100% fill factor, (the leaf pages of the index are filled 100%, but there is some room left on intermediate index pages). In most cases, this default value is not a good choice, especially for clustered indexes. [6.5, 7.0, 2000, 2005] Updated 7-24-2006

*****

If you find that your transaction log grows to an unacceptable size when you run DBCC REINDEX, you can minimize this growth by switching from the Full Recovery mode to the Bulk-Logged mode before you reindex, and when done, switch back. This will significantly reduce the size of the transaction log growth. [2000, 2005] Updated 7-24-2006

*****

If you have a table that has a clustered index on a monotonically increasing or decreasing primary key, and if the table is not subject in UPDATEs or if it has no VARCHAR columns, then the ideal fill factor for the table is 100. This is because such a table will normally not experience any page splits. Because of this, there is no point in leaving any room in the index for page splits. And because the fill factor is 100, SQL Server will require fewer I/Os to read the data in the table, and performance will be boosted. [7.0, 2000, 2005] Updated 7-24-2006

*****

If you are not sure what to make the fill factor for your indexes, your first step is to determine the ratio of disk writes to reads. The way to do this is to use these two counters: Physical Disk Object: % Disk Read Time and Physical Disk Object: % Write Time. When you run both counters on an array, you should get a good feel for what percentage of your I/O are reads and writes. You will want to run this over a period of time representative of your typical server load. If your percentage writes greatly exceeds the percentage of reads, then a lower fill factor is called for. If your percentage of reads greatly exceeds the percentage of writes, then a higher fill factor is called for.

Another Performance Monitor counter you can use to help you select the ideal fill factor for your environment is the SQL Server Access Methods: Pages Splits/Sec. This counter measures the number of page splits that are occurring in SQL Server every second. For best performance, you will want this counter to be as low as possible, as page splits incur extra server overhead, hurting performance. If this number is relatively high, then you may need to lower the fill factor in order to prevent new page splits. If this counter is very low, then the fill factor you have is fine, or it could be a little too low. You won't know unless you increase the fill factor and watch the results.

Ideally, you want a fill factor that prevents excessive page splits, but not so low as to increase the size of the database, which in turn can reduce read performance because of all the extra data pages that need to be read.

Once you know the ratio of disk write to reads, you now have the information you need to help you determine an optimum fill factor for your indexes. [6.5, 7.0, 2000, 2005] Updated 7-24-2006

*****

If you want to determine the level of fragmentation of your indexes due to page splitting, you can run the DBCC SHOWCONTIG command. Since this command requires you to know the ID of both the table and index being analyzed, you may want to run the following script:

--Script to identify table fragmentation

--Declare variables
DECLARE
@ID int,
@IndexID int,
@IndexName varchar(128)

--Set the table and index to be examined
SELECT @IndexName = 'index_name' --enter name of index
SET @ID = OBJECT_ID('table_name') --enter name of table

--Get the Index Values
SELECT @IndexID = IndID
FROM sysindexes
WHERE id = @ID AND name = @IndexName

--Display the fragmentation
DBCC SHOWCONTIG (@id, @IndexID)

While the DBCC SHOWCONTIG command provides several measurements, the key one is Scan Density. This figure should be as close to 100% as possible. If the scan density is less than 75%, then you may want to reindex the tables in your database, and also increase the fill factor if you are finding that the current fill factor you are using is not appropriate. [6.5, 7.0, 2000] Updated 7-24-2006

*****

Here's a script that is used to create DBCC SHOWCONFIG commands for all of the indexes in one or more tables. Once you run this script, it will produce for you a DBCC SHOWCONFIG statement for each index, which you can then run to find out about the level of fragmentation of your indexes. This script is especially handy if you don't know the names of the indexes in your tables (which is most of the time).

SELECT 'dbcc showcontig (' +
CONVERT(varchar(20),i.id) + ',' + -- table id
CONVERT(varchar(20),i.indid) + ') -- ' + -- index id
object_name(i.id) + '.' + -- table name
i.name -- index name
from sysobjects o
inner join sysindexes i
on (o.id = i.id)
where o.type = 'U'
and i.indid < 2
and
i.id = object_id(o.name)
ORDER BY
object_name(i.id), i.indid

Once you run this script, the output will be DBCC SHOWCONFIG statements for each of the tables(s) and index(es). This output can then be cut and pasted into Query Analyzer or Management Studio and run, which produces a DBCC SHOWCONFIG result for every index for every table you specified. [7.0, 2000, 2005] Updated 11-1-2005 Tip contributed by Jeff M. Belina and Steven R. Morrow

Saturday, July 10, 2010

CREATE SQL SERVER SCHEMA

-------------------------------------------------------
CREATE SCHEMA Testing AUTHORIZATION dbo;
GO
CREATE SCHEMA TestingSP AUTHORIZATION dbo;

-------------------------------------------------------
CREATE LOGIN Testing
WITH PASSWORD = 'Testing@123';
USE MarketShiksha;
CREATE USER Testing FOR LOGIN Testing;
GO
---------------------------------------------------------
ALTER SCHEMA

SELECT 'ALTER SCHEMA Testing TRANSFER ' + SCHEMA_NAME(o.[schema_id]) + '.' + o.[name]
FROM sys.objects o WHERE o.[type] = 'U';
-------------------------------------------------------------------

SQL Sever Paging Stored Procedure

CREATE PROCEDURE prc_MediaGallery_SELECT_All
@insPageNo AS INT,
@insPageSize AS INT,
@intTotalCount AS INT out
AS
BEGIN

SET NOCOUNT ON;

DECLARE @intPageCount INT

BEGIN TRY
SELECT @intTotalCount = COUNT(MgId)
FROM TheSikhTV.MediaGallery

SET @intPageCount = @intTotalCount/@insPageSize;
IF (@intTotalCount%@insPageSize<>0)
SET @intPageCount= @intPageCount+1;
IF (@insPageNo>@intPageCount)
SET @insPageNo=@intPageCount;

SELECT TOP(@insPageSize) *
FROM (
SELECT ROW_NUMBER() OVER ( ORDER BY CreatedDate DESC) AS RowNum,
MgId,MgImageName,MgImagePath,CONVERT (Varchar (20),CreatedDate,106)as CreatedDate1,ModifiedDate
FROM TheSikhTV.MediaGallery
) A
WHERE A.RowNum > (@insPageSize * (@insPageNo - 1));

RETURN 1;
END TRY
BEGIN CATCH
RETURN -1;
END CATCH

END