TW_Q_LENGTH
sizeof(TW_Command_Full) * TW_Q_LENGTH,
TW_SECTOR_SIZE * TW_Q_LENGTH,
tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
tw_dev->event_queue[0] = kzalloc_objs(TW_Event, TW_Q_LENGTH);
for (i = 0; i < TW_Q_LENGTH; i++) {
tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH;
tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH;
for (i = 0; i < TW_Q_LENGTH; i++) {
for (i = 0; i < TW_Q_LENGTH; i++) {
.can_queue = TW_Q_LENGTH-2,
if ((tw_dev->error_index + 1) == TW_Q_LENGTH)
tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
dma_free_coherent(&tw_dev->tw_pci_dev->dev, size * TW_Q_LENGTH,
memset(cpu_addr, 0, size*TW_Q_LENGTH);
for (i = 0; i < TW_Q_LENGTH; i++) {
event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH;
event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH;
event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH;
unsigned long *generic_buffer_virt[TW_Q_LENGTH];
dma_addr_t generic_buffer_phys[TW_Q_LENGTH];
TW_Command_Full *command_packet_virt[TW_Q_LENGTH];
dma_addr_t command_packet_phys[TW_Q_LENGTH];
struct scsi_cmnd *srb[TW_Q_LENGTH];
unsigned char free_queue[TW_Q_LENGTH];
unsigned char pending_queue[TW_Q_LENGTH];
int state[TW_Q_LENGTH];
TW_Event *event_queue[TW_Q_LENGTH];
tw_dev->event_queue[0] = kzalloc_objs(TW_Event, TW_Q_LENGTH);
for (i = 0; i < TW_Q_LENGTH; i++) {
ret = memory_read_from_buffer(outbuf, count, &offset, tw_dev->event_queue[0], sizeof(TW_Event) * TW_Q_LENGTH);
for (i=0;i<TW_Q_LENGTH;i++) {
while (i < TW_Q_LENGTH) {
for (i = 0; i < TW_Q_LENGTH; i++) {
for (i = 0; i < TW_Q_LENGTH; i++) {
.can_queue = TW_Q_LENGTH-2,
tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
size * TW_Q_LENGTH, &dma_handle,
for (i = 0; i < TW_Q_LENGTH; i++) {
sizeof(TW_Command_Full)*TW_Q_LENGTH,
TW_SECTOR_SIZE*TW_Q_LENGTH,
TW_Q_LENGTH,
unsigned long *generic_buffer_virt[TW_Q_LENGTH];
dma_addr_t generic_buffer_phys[TW_Q_LENGTH];
TW_Command_Full *command_packet_virt[TW_Q_LENGTH];
dma_addr_t command_packet_phys[TW_Q_LENGTH];
TW_Command_Apache_Header *sense_buffer_virt[TW_Q_LENGTH];
dma_addr_t sense_buffer_phys[TW_Q_LENGTH];
struct scsi_cmnd *srb[TW_Q_LENGTH];
unsigned char free_queue[TW_Q_LENGTH];
int state[TW_Q_LENGTH];
TW_Event *event_queue[TW_Q_LENGTH];
sizeof(TW_Command) * TW_Q_LENGTH,
sizeof(TW_Sector) * TW_Q_LENGTH,
for (i=0;i<TW_Q_LENGTH;i++) {
for (i=0;i<TW_Q_LENGTH;i++) {
for (i=0;i<TW_Q_LENGTH;i++) {
if (tw_dev->pending_head == TW_Q_LENGTH-1) {
.can_queue = TW_Q_LENGTH-2,
if (tw_dev->pending_tail == TW_Q_LENGTH-1) {
tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
if (tw_dev->aen_tail == TW_Q_LENGTH - 1) {
if (tw_dev->aen_head == TW_Q_LENGTH - 1) {
if (tw_dev->aen_tail == TW_Q_LENGTH - 1) {
if (tw_dev->aen_head == TW_Q_LENGTH - 1) {
size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
dma_free_coherent(&tw_dev->tw_pci_dev->dev, size * TW_Q_LENGTH,
memset(cpu_addr, 0, size*TW_Q_LENGTH);
for (i=0;i<TW_Q_LENGTH;i++) {
if (tw_dev->aen_head == TW_Q_LENGTH - 1) {
unsigned long *alignment_virtual_address[TW_Q_LENGTH];
unsigned long alignment_physical_address[TW_Q_LENGTH];
unsigned long *command_packet_virtual_address[TW_Q_LENGTH];
unsigned long command_packet_physical_address[TW_Q_LENGTH];
struct scsi_cmnd *srb[TW_Q_LENGTH];
unsigned char free_queue[TW_Q_LENGTH];
unsigned char pending_queue[TW_Q_LENGTH];
TW_Cmd_State state[TW_Q_LENGTH];
unsigned short aen_queue[TW_Q_LENGTH];