Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : #ifndef _SCSI_SCSI_HOST_H
3 : #define _SCSI_SCSI_HOST_H
4 :
5 : #include <linux/device.h>
6 : #include <linux/list.h>
7 : #include <linux/types.h>
8 : #include <linux/workqueue.h>
9 : #include <linux/mutex.h>
10 : #include <linux/seq_file.h>
11 : #include <linux/blk-mq.h>
12 : #include <scsi/scsi.h>
13 :
14 : struct block_device;
15 : struct completion;
16 : struct module;
17 : struct scsi_cmnd;
18 : struct scsi_device;
19 : struct scsi_host_cmd_pool;
20 : struct scsi_target;
21 : struct Scsi_Host;
22 : struct scsi_host_cmd_pool;
23 : struct scsi_transport_template;
24 :
25 :
26 : #define SG_ALL SG_CHUNK_SIZE
27 :
28 : #define MODE_UNKNOWN 0x00
29 : #define MODE_INITIATOR 0x01
30 : #define MODE_TARGET 0x02
31 :
32 : struct scsi_host_template {
33 : struct module *module;
34 : const char *name;
35 :
36 : /*
37 : * The info function will return whatever useful information the
38 : * developer sees fit. If not provided, then the name field will
39 : * be used instead.
40 : *
41 : * Status: OPTIONAL
42 : */
43 : const char *(* info)(struct Scsi_Host *);
44 :
45 : /*
46 : * Ioctl interface
47 : *
48 : * Status: OPTIONAL
49 : */
50 : int (*ioctl)(struct scsi_device *dev, unsigned int cmd,
51 : void __user *arg);
52 :
53 :
54 : #ifdef CONFIG_COMPAT
55 : /*
56 : * Compat handler. Handle 32bit ABI.
57 : * When unknown ioctl is passed return -ENOIOCTLCMD.
58 : *
59 : * Status: OPTIONAL
60 : */
61 : int (*compat_ioctl)(struct scsi_device *dev, unsigned int cmd,
62 : void __user *arg);
63 : #endif
64 :
65 : int (*init_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
66 : int (*exit_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
67 :
68 : /*
69 : * The queuecommand function is used to queue up a scsi
70 : * command block to the LLDD. When the driver finished
71 : * processing the command the done callback is invoked.
72 : *
73 : * If queuecommand returns 0, then the driver has accepted the
74 : * command. It must also push it to the HBA if the scsi_cmnd
75 : * flag SCMD_LAST is set, or if the driver does not implement
76 : * commit_rqs. The done() function must be called on the command
77 : * when the driver has finished with it. (you may call done on the
78 : * command before queuecommand returns, but in this case you
79 : * *must* return 0 from queuecommand).
80 : *
81 : * Queuecommand may also reject the command, in which case it may
82 : * not touch the command and must not call done() for it.
83 : *
84 : * There are two possible rejection returns:
85 : *
86 : * SCSI_MLQUEUE_DEVICE_BUSY: Block this device temporarily, but
87 : * allow commands to other devices serviced by this host.
88 : *
89 : * SCSI_MLQUEUE_HOST_BUSY: Block all devices served by this
90 : * host temporarily.
91 : *
92 : * For compatibility, any other non-zero return is treated the
93 : * same as SCSI_MLQUEUE_HOST_BUSY.
94 : *
95 : * NOTE: "temporarily" means either until the next command for#
96 : * this device/host completes, or a period of time determined by
97 : * I/O pressure in the system if there are no other outstanding
98 : * commands.
99 : *
100 : * STATUS: REQUIRED
101 : */
102 : int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *);
103 :
104 : /*
105 : * The commit_rqs function is used to trigger a hardware
106 : * doorbell after some requests have been queued with
107 : * queuecommand, when an error is encountered before sending
108 : * the request with SCMD_LAST set.
109 : *
110 : * STATUS: OPTIONAL
111 : */
112 : void (*commit_rqs)(struct Scsi_Host *, u16);
113 :
114 : /*
115 : * This is an error handling strategy routine. You don't need to
116 : * define one of these if you don't want to - there is a default
117 : * routine that is present that should work in most cases. For those
118 : * driver authors that have the inclination and ability to write their
119 : * own strategy routine, this is where it is specified. Note - the
120 : * strategy routine is *ALWAYS* run in the context of the kernel eh
121 : * thread. Thus you are guaranteed to *NOT* be in an interrupt
122 : * handler when you execute this, and you are also guaranteed to
123 : * *NOT* have any other commands being queued while you are in the
124 : * strategy routine. When you return from this function, operations
125 : * return to normal.
126 : *
127 : * See scsi_error.c scsi_unjam_host for additional comments about
128 : * what this function should and should not be attempting to do.
129 : *
130 : * Status: REQUIRED (at least one of them)
131 : */
132 : int (* eh_abort_handler)(struct scsi_cmnd *);
133 : int (* eh_device_reset_handler)(struct scsi_cmnd *);
134 : int (* eh_target_reset_handler)(struct scsi_cmnd *);
135 : int (* eh_bus_reset_handler)(struct scsi_cmnd *);
136 : int (* eh_host_reset_handler)(struct scsi_cmnd *);
137 :
138 : /*
139 : * Before the mid layer attempts to scan for a new device where none
140 : * currently exists, it will call this entry in your driver. Should
141 : * your driver need to allocate any structs or perform any other init
142 : * items in order to send commands to a currently unused target/lun
143 : * combo, then this is where you can perform those allocations. This
144 : * is specifically so that drivers won't have to perform any kind of
145 : * "is this a new device" checks in their queuecommand routine,
146 : * thereby making the hot path a bit quicker.
147 : *
148 : * Return values: 0 on success, non-0 on failure
149 : *
150 : * Deallocation: If we didn't find any devices at this ID, you will
151 : * get an immediate call to slave_destroy(). If we find something
152 : * here then you will get a call to slave_configure(), then the
153 : * device will be used for however long it is kept around, then when
154 : * the device is removed from the system (or * possibly at reboot
155 : * time), you will then get a call to slave_destroy(). This is
156 : * assuming you implement slave_configure and slave_destroy.
157 : * However, if you allocate memory and hang it off the device struct,
158 : * then you must implement the slave_destroy() routine at a minimum
159 : * in order to avoid leaking memory
160 : * each time a device is tore down.
161 : *
162 : * Status: OPTIONAL
163 : */
164 : int (* slave_alloc)(struct scsi_device *);
165 :
166 : /*
167 : * Once the device has responded to an INQUIRY and we know the
168 : * device is online, we call into the low level driver with the
169 : * struct scsi_device *. If the low level device driver implements
170 : * this function, it *must* perform the task of setting the queue
171 : * depth on the device. All other tasks are optional and depend
172 : * on what the driver supports and various implementation details.
173 : *
174 : * Things currently recommended to be handled at this time include:
175 : *
176 : * 1. Setting the device queue depth. Proper setting of this is
177 : * described in the comments for scsi_change_queue_depth.
178 : * 2. Determining if the device supports the various synchronous
179 : * negotiation protocols. The device struct will already have
180 : * responded to INQUIRY and the results of the standard items
181 : * will have been shoved into the various device flag bits, eg.
182 : * device->sdtr will be true if the device supports SDTR messages.
183 : * 3. Allocating command structs that the device will need.
184 : * 4. Setting the default timeout on this device (if needed).
185 : * 5. Anything else the low level driver might want to do on a device
186 : * specific setup basis...
187 : * 6. Return 0 on success, non-0 on error. The device will be marked
188 : * as offline on error so that no access will occur. If you return
189 : * non-0, your slave_destroy routine will never get called for this
190 : * device, so don't leave any loose memory hanging around, clean
191 : * up after yourself before returning non-0
192 : *
193 : * Status: OPTIONAL
194 : */
195 : int (* slave_configure)(struct scsi_device *);
196 :
197 : /*
198 : * Immediately prior to deallocating the device and after all activity
199 : * has ceased the mid layer calls this point so that the low level
200 : * driver may completely detach itself from the scsi device and vice
201 : * versa. The low level driver is responsible for freeing any memory
202 : * it allocated in the slave_alloc or slave_configure calls.
203 : *
204 : * Status: OPTIONAL
205 : */
206 : void (* slave_destroy)(struct scsi_device *);
207 :
208 : /*
209 : * Before the mid layer attempts to scan for a new device attached
210 : * to a target where no target currently exists, it will call this
211 : * entry in your driver. Should your driver need to allocate any
212 : * structs or perform any other init items in order to send commands
213 : * to a currently unused target, then this is where you can perform
214 : * those allocations.
215 : *
216 : * Return values: 0 on success, non-0 on failure
217 : *
218 : * Status: OPTIONAL
219 : */
220 : int (* target_alloc)(struct scsi_target *);
221 :
222 : /*
223 : * Immediately prior to deallocating the target structure, and
224 : * after all activity to attached scsi devices has ceased, the
225 : * midlayer calls this point so that the driver may deallocate
226 : * and terminate any references to the target.
227 : *
228 : * Status: OPTIONAL
229 : */
230 : void (* target_destroy)(struct scsi_target *);
231 :
232 : /*
233 : * If a host has the ability to discover targets on its own instead
234 : * of scanning the entire bus, it can fill in this function and
235 : * call scsi_scan_host(). This function will be called periodically
236 : * until it returns 1 with the scsi_host and the elapsed time of
237 : * the scan in jiffies.
238 : *
239 : * Status: OPTIONAL
240 : */
241 : int (* scan_finished)(struct Scsi_Host *, unsigned long);
242 :
243 : /*
244 : * If the host wants to be called before the scan starts, but
245 : * after the midlayer has set up ready for the scan, it can fill
246 : * in this function.
247 : *
248 : * Status: OPTIONAL
249 : */
250 : void (* scan_start)(struct Scsi_Host *);
251 :
252 : /*
253 : * Fill in this function to allow the queue depth of this host
254 : * to be changeable (on a per device basis). Returns either
255 : * the current queue depth setting (may be different from what
256 : * was passed in) or an error. An error should only be
257 : * returned if the requested depth is legal but the driver was
258 : * unable to set it. If the requested depth is illegal, the
259 : * driver should set and return the closest legal queue depth.
260 : *
261 : * Status: OPTIONAL
262 : */
263 : int (* change_queue_depth)(struct scsi_device *, int);
264 :
265 : /*
266 : * This functions lets the driver expose the queue mapping
267 : * to the block layer.
268 : *
269 : * Status: OPTIONAL
270 : */
271 : int (* map_queues)(struct Scsi_Host *shost);
272 :
273 : /*
274 : * Check if scatterlists need to be padded for DMA draining.
275 : *
276 : * Status: OPTIONAL
277 : */
278 : bool (* dma_need_drain)(struct request *rq);
279 :
280 : /*
281 : * This function determines the BIOS parameters for a given
282 : * harddisk. These tend to be numbers that are made up by
283 : * the host adapter. Parameters:
284 : * size, device, list (heads, sectors, cylinders)
285 : *
286 : * Status: OPTIONAL
287 : */
288 : int (* bios_param)(struct scsi_device *, struct block_device *,
289 : sector_t, int []);
290 :
291 : /*
292 : * This function is called when one or more partitions on the
293 : * device reach beyond the end of the device.
294 : *
295 : * Status: OPTIONAL
296 : */
297 : void (*unlock_native_capacity)(struct scsi_device *);
298 :
299 : /*
300 : * Can be used to export driver statistics and other infos to the
301 : * world outside the kernel ie. userspace and it also provides an
302 : * interface to feed the driver with information.
303 : *
304 : * Status: OBSOLETE
305 : */
306 : int (*show_info)(struct seq_file *, struct Scsi_Host *);
307 : int (*write_info)(struct Scsi_Host *, char *, int);
308 :
309 : /*
310 : * This is an optional routine that allows the transport to become
311 : * involved when a scsi io timer fires. The return value tells the
312 : * timer routine how to finish the io timeout handling.
313 : *
314 : * Status: OPTIONAL
315 : */
316 : enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
317 : /*
318 : * Optional routine that allows the transport to decide if a cmd
319 : * is retryable. Return true if the transport is in a state the
320 : * cmd should be retried on.
321 : */
322 : bool (*eh_should_retry_cmd)(struct scsi_cmnd *scmd);
323 :
324 : /* This is an optional routine that allows transport to initiate
325 : * LLD adapter or firmware reset using sysfs attribute.
326 : *
327 : * Return values: 0 on success, -ve value on failure.
328 : *
329 : * Status: OPTIONAL
330 : */
331 :
332 : int (*host_reset)(struct Scsi_Host *shost, int reset_type);
333 : #define SCSI_ADAPTER_RESET 1
334 : #define SCSI_FIRMWARE_RESET 2
335 :
336 :
337 : /*
338 : * Name of proc directory
339 : */
340 : const char *proc_name;
341 :
342 : /*
343 : * Used to store the procfs directory if a driver implements the
344 : * show_info method.
345 : */
346 : struct proc_dir_entry *proc_dir;
347 :
348 : /*
349 : * This determines if we will use a non-interrupt driven
350 : * or an interrupt driven scheme. It is set to the maximum number
351 : * of simultaneous commands a single hw queue in HBA will accept.
352 : */
353 : int can_queue;
354 :
355 : /*
356 : * In many instances, especially where disconnect / reconnect are
357 : * supported, our host also has an ID on the SCSI bus. If this is
358 : * the case, then it must be reserved. Please set this_id to -1 if
359 : * your setup is in single initiator mode, and the host lacks an
360 : * ID.
361 : */
362 : int this_id;
363 :
364 : /*
365 : * This determines the degree to which the host adapter is capable
366 : * of scatter-gather.
367 : */
368 : unsigned short sg_tablesize;
369 : unsigned short sg_prot_tablesize;
370 :
371 : /*
372 : * Set this if the host adapter has limitations beside segment count.
373 : */
374 : unsigned int max_sectors;
375 :
376 : /*
377 : * Maximum size in bytes of a single segment.
378 : */
379 : unsigned int max_segment_size;
380 :
381 : /*
382 : * DMA scatter gather segment boundary limit. A segment crossing this
383 : * boundary will be split in two.
384 : */
385 : unsigned long dma_boundary;
386 :
387 : unsigned long virt_boundary_mask;
388 :
389 : /*
390 : * This specifies "machine infinity" for host templates which don't
391 : * limit the transfer size. Note this limit represents an absolute
392 : * maximum, and may be over the transfer limits allowed for
393 : * individual devices (e.g. 256 for SCSI-1).
394 : */
395 : #define SCSI_DEFAULT_MAX_SECTORS 1024
396 :
397 : /*
398 : * True if this host adapter can make good use of linked commands.
399 : * This will allow more than one command to be queued to a given
400 : * unit on a given host. Set this to the maximum number of command
401 : * blocks to be provided for each device. Set this to 1 for one
402 : * command block per lun, 2 for two, etc. Do not set this to 0.
403 : * You should make sure that the host adapter will do the right thing
404 : * before you try setting this above 1.
405 : */
406 : short cmd_per_lun;
407 :
408 : /*
409 : * present contains counter indicating how many boards of this
410 : * type were found when we did the scan.
411 : */
412 : unsigned char present;
413 :
414 : /* If use block layer to manage tags, this is tag allocation policy */
415 : int tag_alloc_policy;
416 :
417 : /*
418 : * Track QUEUE_FULL events and reduce queue depth on demand.
419 : */
420 : unsigned track_queue_depth:1;
421 :
422 : /*
423 : * This specifies the mode that a LLD supports.
424 : */
425 : unsigned supported_mode:2;
426 :
427 : /*
428 : * True if this host adapter uses unchecked DMA onto an ISA bus.
429 : */
430 : unsigned unchecked_isa_dma:1;
431 :
432 : /*
433 : * True for emulated SCSI host adapters (e.g. ATAPI).
434 : */
435 : unsigned emulated:1;
436 :
437 : /*
438 : * True if the low-level driver performs its own reset-settle delays.
439 : */
440 : unsigned skip_settle_delay:1;
441 :
442 : /* True if the controller does not support WRITE SAME */
443 : unsigned no_write_same:1;
444 :
445 : /* True if the host uses host-wide tagspace */
446 : unsigned host_tagset:1;
447 :
448 : /*
449 : * Countdown for host blocking with no commands outstanding.
450 : */
451 : unsigned int max_host_blocked;
452 :
453 : /*
454 : * Default value for the blocking. If the queue is empty,
455 : * host_blocked counts down in the request_fn until it restarts
456 : * host operations as zero is reached.
457 : *
458 : * FIXME: This should probably be a value in the template
459 : */
460 : #define SCSI_DEFAULT_HOST_BLOCKED 7
461 :
462 : /*
463 : * Pointer to the sysfs class properties for this host, NULL terminated.
464 : */
465 : struct device_attribute **shost_attrs;
466 :
467 : /*
468 : * Pointer to the SCSI device properties for this host, NULL terminated.
469 : */
470 : struct device_attribute **sdev_attrs;
471 :
472 : /*
473 : * Pointer to the SCSI device attribute groups for this host,
474 : * NULL terminated.
475 : */
476 : const struct attribute_group **sdev_groups;
477 :
478 : /*
479 : * Vendor Identifier associated with the host
480 : *
481 : * Note: When specifying vendor_id, be sure to read the
482 : * Vendor Type and ID formatting requirements specified in
483 : * scsi_netlink.h
484 : */
485 : u64 vendor_id;
486 :
487 : /*
488 : * Additional per-command data allocated for the driver.
489 : */
490 : unsigned int cmd_size;
491 : struct scsi_host_cmd_pool *cmd_pool;
492 :
493 : /* Delay for runtime autosuspend */
494 : int rpm_autosuspend_delay;
495 : };
496 :
497 : /*
498 : * Temporary #define for host lock push down. Can be removed when all
499 : * drivers have been updated to take advantage of unlocked
500 : * queuecommand.
501 : *
502 : */
503 : #define DEF_SCSI_QCMD(func_name) \
504 : int func_name(struct Scsi_Host *shost, struct scsi_cmnd *cmd) \
505 : { \
506 : unsigned long irq_flags; \
507 : int rc; \
508 : spin_lock_irqsave(shost->host_lock, irq_flags); \
509 : rc = func_name##_lck (cmd, cmd->scsi_done); \
510 : spin_unlock_irqrestore(shost->host_lock, irq_flags); \
511 : return rc; \
512 : }
513 :
514 :
515 : /*
516 : * shost state: If you alter this, you also need to alter scsi_sysfs.c
517 : * (for the ascii descriptions) and the state model enforcer:
518 : * scsi_host_set_state()
519 : */
520 : enum scsi_host_state {
521 : SHOST_CREATED = 1,
522 : SHOST_RUNNING,
523 : SHOST_CANCEL,
524 : SHOST_DEL,
525 : SHOST_RECOVERY,
526 : SHOST_CANCEL_RECOVERY,
527 : SHOST_DEL_RECOVERY,
528 : };
529 :
530 : struct Scsi_Host {
531 : /*
532 : * __devices is protected by the host_lock, but you should
533 : * usually use scsi_device_lookup / shost_for_each_device
534 : * to access it and don't care about locking yourself.
535 : * In the rare case of being in irq context you can use
536 : * their __ prefixed variants with the lock held. NEVER
537 : * access this list directly from a driver.
538 : */
539 : struct list_head __devices;
540 : struct list_head __targets;
541 :
542 : struct list_head starved_list;
543 :
544 : spinlock_t default_lock;
545 : spinlock_t *host_lock;
546 :
547 : struct mutex scan_mutex;/* serialize scanning activity */
548 :
549 : struct list_head eh_cmd_q;
550 : struct task_struct * ehandler; /* Error recovery thread. */
551 : struct completion * eh_action; /* Wait for specific actions on the
552 : host. */
553 : wait_queue_head_t host_wait;
554 : struct scsi_host_template *hostt;
555 : struct scsi_transport_template *transportt;
556 :
557 : /* Area to keep a shared tag map */
558 : struct blk_mq_tag_set tag_set;
559 :
560 : atomic_t host_blocked;
561 :
562 : unsigned int host_failed; /* commands that failed.
563 : protected by host_lock */
564 : unsigned int host_eh_scheduled; /* EH scheduled without command */
565 :
566 : unsigned int host_no; /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
567 :
568 : /* next two fields are used to bound the time spent in error handling */
569 : int eh_deadline;
570 : unsigned long last_reset;
571 :
572 :
573 : /*
574 : * These three parameters can be used to allow for wide scsi,
575 : * and for host adapters that support multiple busses
576 : * The last two should be set to 1 more than the actual max id
577 : * or lun (e.g. 8 for SCSI parallel systems).
578 : */
579 : unsigned int max_channel;
580 : unsigned int max_id;
581 : u64 max_lun;
582 :
583 : /*
584 : * This is a unique identifier that must be assigned so that we
585 : * have some way of identifying each detected host adapter properly
586 : * and uniquely. For hosts that do not support more than one card
587 : * in the system at one time, this does not need to be set. It is
588 : * initialized to 0 in scsi_register.
589 : */
590 : unsigned int unique_id;
591 :
592 : /*
593 : * The maximum length of SCSI commands that this host can accept.
594 : * Probably 12 for most host adapters, but could be 16 for others.
595 : * or 260 if the driver supports variable length cdbs.
596 : * For drivers that don't set this field, a value of 12 is
597 : * assumed.
598 : */
599 : unsigned short max_cmd_len;
600 :
601 : int this_id;
602 : int can_queue;
603 : short cmd_per_lun;
604 : short unsigned int sg_tablesize;
605 : short unsigned int sg_prot_tablesize;
606 : unsigned int max_sectors;
607 : unsigned int max_segment_size;
608 : unsigned long dma_boundary;
609 : unsigned long virt_boundary_mask;
610 : /*
611 : * In scsi-mq mode, the number of hardware queues supported by the LLD.
612 : *
613 : * Note: it is assumed that each hardware queue has a queue depth of
614 : * can_queue. In other words, the total queue depth per host
615 : * is nr_hw_queues * can_queue. However, for when host_tagset is set,
616 : * the total queue depth is can_queue.
617 : */
618 : unsigned nr_hw_queues;
619 : unsigned active_mode:2;
620 : unsigned unchecked_isa_dma:1;
621 :
622 : /*
623 : * Host has requested that no further requests come through for the
624 : * time being.
625 : */
626 : unsigned host_self_blocked:1;
627 :
628 : /*
629 : * Host uses correct SCSI ordering not PC ordering. The bit is
630 : * set for the minority of drivers whose authors actually read
631 : * the spec ;).
632 : */
633 : unsigned reverse_ordering:1;
634 :
635 : /* Task mgmt function in progress */
636 : unsigned tmf_in_progress:1;
637 :
638 : /* Asynchronous scan in progress */
639 : unsigned async_scan:1;
640 :
641 : /* Don't resume host in EH */
642 : unsigned eh_noresume:1;
643 :
644 : /* The controller does not support WRITE SAME */
645 : unsigned no_write_same:1;
646 :
647 : /* True if the host uses host-wide tagspace */
648 : unsigned host_tagset:1;
649 :
650 : /* Host responded with short (<36 bytes) INQUIRY result */
651 : unsigned short_inquiry:1;
652 :
653 : /* The transport requires the LUN bits NOT to be stored in CDB[1] */
654 : unsigned no_scsi2_lun_in_cdb:1;
655 :
656 : /*
657 : * Optional work queue to be utilized by the transport
658 : */
659 : char work_q_name[20];
660 : struct workqueue_struct *work_q;
661 :
662 : /*
663 : * Task management function work queue
664 : */
665 : struct workqueue_struct *tmf_work_q;
666 :
667 : /*
668 : * Value host_blocked counts down from
669 : */
670 : unsigned int max_host_blocked;
671 :
672 : /* Protection Information */
673 : unsigned int prot_capabilities;
674 : unsigned char prot_guard_type;
675 :
676 : /* legacy crap */
677 : unsigned long base;
678 : unsigned long io_port;
679 : unsigned char n_io_port;
680 : unsigned char dma_channel;
681 : unsigned int irq;
682 :
683 :
684 : enum scsi_host_state shost_state;
685 :
686 : /* ldm bits */
687 : struct device shost_gendev, shost_dev;
688 :
689 : /*
690 : * Points to the transport data (if any) which is allocated
691 : * separately
692 : */
693 : void *shost_data;
694 :
695 : /*
696 : * Points to the physical bus device we'd use to do DMA
697 : * Needed just in case we have virtual hosts.
698 : */
699 : struct device *dma_dev;
700 :
701 : /*
702 : * We should ensure that this is aligned, both for better performance
703 : * and also because some compilers (m68k) don't automatically force
704 : * alignment to a long boundary.
705 : */
706 : unsigned long hostdata[] /* Used for storage of host specific stuff */
707 : __attribute__ ((aligned (sizeof(unsigned long))));
708 : };
709 :
710 : #define class_to_shost(d) \
711 : container_of(d, struct Scsi_Host, shost_dev)
712 :
713 : #define shost_printk(prefix, shost, fmt, a...) \
714 : dev_printk(prefix, &(shost)->shost_gendev, fmt, ##a)
715 :
716 0 : static inline void *shost_priv(struct Scsi_Host *shost)
717 : {
718 0 : return (void *)shost->hostdata;
719 : }
720 :
721 : int scsi_is_host_device(const struct device *);
722 :
723 0 : static inline struct Scsi_Host *dev_to_shost(struct device *dev)
724 : {
725 0 : while (!scsi_is_host_device(dev)) {
726 0 : if (!dev->parent)
727 : return NULL;
728 : dev = dev->parent;
729 : }
730 0 : return container_of(dev, struct Scsi_Host, shost_gendev);
731 : }
732 :
733 0 : static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
734 : {
735 0 : return shost->shost_state == SHOST_RECOVERY ||
736 0 : shost->shost_state == SHOST_CANCEL_RECOVERY ||
737 0 : shost->shost_state == SHOST_DEL_RECOVERY ||
738 : shost->tmf_in_progress;
739 : }
740 :
741 : extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
742 : extern void scsi_flush_work(struct Scsi_Host *);
743 :
744 : extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int);
745 : extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *,
746 : struct device *,
747 : struct device *);
748 : extern void scsi_scan_host(struct Scsi_Host *);
749 : extern void scsi_rescan_device(struct device *);
750 : extern void scsi_remove_host(struct Scsi_Host *);
751 : extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
752 : extern int scsi_host_busy(struct Scsi_Host *shost);
753 : extern void scsi_host_put(struct Scsi_Host *t);
754 : extern struct Scsi_Host *scsi_host_lookup(unsigned short);
755 : extern const char *scsi_host_state_name(enum scsi_host_state);
756 : extern void scsi_host_complete_all_commands(struct Scsi_Host *shost,
757 : int status);
758 :
759 0 : static inline int __must_check scsi_add_host(struct Scsi_Host *host,
760 : struct device *dev)
761 : {
762 0 : return scsi_add_host_with_dma(host, dev, dev);
763 : }
764 :
765 0 : static inline struct device *scsi_get_device(struct Scsi_Host *shost)
766 : {
767 0 : return shost->shost_gendev.parent;
768 : }
769 :
770 : /**
771 : * scsi_host_scan_allowed - Is scanning of this host allowed
772 : * @shost: Pointer to Scsi_Host.
773 : **/
774 0 : static inline int scsi_host_scan_allowed(struct Scsi_Host *shost)
775 : {
776 0 : return shost->shost_state == SHOST_RUNNING ||
777 : shost->shost_state == SHOST_RECOVERY;
778 : }
779 :
780 : extern void scsi_unblock_requests(struct Scsi_Host *);
781 : extern void scsi_block_requests(struct Scsi_Host *);
782 : extern int scsi_host_block(struct Scsi_Host *shost);
783 : extern int scsi_host_unblock(struct Scsi_Host *shost, int new_state);
784 :
785 : void scsi_host_busy_iter(struct Scsi_Host *,
786 : bool (*fn)(struct scsi_cmnd *, void *, bool), void *priv);
787 :
788 : struct class_container;
789 :
790 : /*
791 : * These two functions are used to allocate and free a pseudo device
792 : * which will connect to the host adapter itself rather than any
793 : * physical device. You must deallocate when you are done with the
794 : * thing. This physical pseudo-device isn't real and won't be available
795 : * from any high-level drivers.
796 : */
797 : extern void scsi_free_host_dev(struct scsi_device *);
798 : extern struct scsi_device *scsi_get_host_dev(struct Scsi_Host *);
799 :
800 : /*
801 : * DIF defines the exchange of protection information between
802 : * initiator and SBC block device.
803 : *
804 : * DIX defines the exchange of protection information between OS and
805 : * initiator.
806 : */
807 : enum scsi_host_prot_capabilities {
808 : SHOST_DIF_TYPE1_PROTECTION = 1 << 0, /* T10 DIF Type 1 */
809 : SHOST_DIF_TYPE2_PROTECTION = 1 << 1, /* T10 DIF Type 2 */
810 : SHOST_DIF_TYPE3_PROTECTION = 1 << 2, /* T10 DIF Type 3 */
811 :
812 : SHOST_DIX_TYPE0_PROTECTION = 1 << 3, /* DIX between OS and HBA only */
813 : SHOST_DIX_TYPE1_PROTECTION = 1 << 4, /* DIX with DIF Type 1 */
814 : SHOST_DIX_TYPE2_PROTECTION = 1 << 5, /* DIX with DIF Type 2 */
815 : SHOST_DIX_TYPE3_PROTECTION = 1 << 6, /* DIX with DIF Type 3 */
816 : };
817 :
818 : /*
819 : * SCSI hosts which support the Data Integrity Extensions must
820 : * indicate their capabilities by setting the prot_capabilities using
821 : * this call.
822 : */
823 : static inline void scsi_host_set_prot(struct Scsi_Host *shost, unsigned int mask)
824 : {
825 : shost->prot_capabilities = mask;
826 : }
827 :
828 0 : static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost)
829 : {
830 0 : return shost->prot_capabilities;
831 : }
832 :
833 0 : static inline int scsi_host_prot_dma(struct Scsi_Host *shost)
834 : {
835 0 : return shost->prot_capabilities >= SHOST_DIX_TYPE0_PROTECTION;
836 : }
837 :
838 0 : static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type)
839 : {
840 0 : static unsigned char cap[] = { 0,
841 : SHOST_DIF_TYPE1_PROTECTION,
842 : SHOST_DIF_TYPE2_PROTECTION,
843 : SHOST_DIF_TYPE3_PROTECTION };
844 :
845 0 : if (target_type >= ARRAY_SIZE(cap))
846 : return 0;
847 :
848 0 : return shost->prot_capabilities & cap[target_type] ? target_type : 0;
849 : }
850 :
851 0 : static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsigned int target_type)
852 : {
853 : #if defined(CONFIG_BLK_DEV_INTEGRITY)
854 : static unsigned char cap[] = { SHOST_DIX_TYPE0_PROTECTION,
855 : SHOST_DIX_TYPE1_PROTECTION,
856 : SHOST_DIX_TYPE2_PROTECTION,
857 : SHOST_DIX_TYPE3_PROTECTION };
858 :
859 : if (target_type >= ARRAY_SIZE(cap))
860 : return 0;
861 :
862 : return shost->prot_capabilities & cap[target_type];
863 : #endif
864 0 : return 0;
865 : }
866 :
867 : /*
868 : * All DIX-capable initiators must support the T10-mandated CRC
869 : * checksum. Controllers can optionally implement the IP checksum
870 : * scheme which has much lower impact on system performance. Note
871 : * that the main rationale for the checksum is to match integrity
872 : * metadata with data. Detecting bit errors are a job for ECC memory
873 : * and buses.
874 : */
875 :
876 : enum scsi_host_guard_type {
877 : SHOST_DIX_GUARD_CRC = 1 << 0,
878 : SHOST_DIX_GUARD_IP = 1 << 1,
879 : };
880 :
881 : static inline void scsi_host_set_guard(struct Scsi_Host *shost, unsigned char type)
882 : {
883 : shost->prot_guard_type = type;
884 : }
885 :
886 : static inline unsigned char scsi_host_get_guard(struct Scsi_Host *shost)
887 : {
888 : return shost->prot_guard_type;
889 : }
890 :
891 : extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state);
892 :
893 : #endif /* _SCSI_SCSI_HOST_H */
|